diff --git a/config.local/arm64/config.sonic-aspeed b/config.local/arm64/config.sonic-aspeed new file mode 100644 index 000000000..1323634ef --- /dev/null +++ b/config.local/arm64/config.sonic-aspeed @@ -0,0 +1,105 @@ +CONFIG_ARCH_ASPEED=y +CONFIG_NET_VENDOR_FARADAY=y +CONFIG_FTGMAC100=y +CONFIG_UNICODE=y +CONFIG_BLOCK=y +CONFIG_LBDAF=y +CONFIG_FS_MBCACHE=y +CONFIG_JBD2=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_MCTP=y +CONFIG_SERIAL_8250_ASPEED_VUART=y +CONFIG_SERIAL_8250_ASPEED=y +CONFIG_SERIAL_8250_NR_UARTS=24 +CONFIG_SERIAL_8250_RUNTIME_UARTS=24 +CONFIG_CAN_ASPEED=m +CONFIG_MDIO_DEVICE=m +CONFIG_MDIO_BUS=m +CONFIG_MDIO_ASPEED=m +CONFIG_ASPEED_KCS_IPMI_BMC=m +CONFIG_ASPEED_BT_IPMI_BMC=m +CONFIG_SPI_ASPEED_SMC=m +CONFIG_I2C_ASPEED=m +CONFIG_SPI_ASPEED_TXRX=y +CONFIG_PINCTRL_ASPEED=y +CONFIG_PINCTRL_ASPEED_G7=y +CONFIG_GPIO_ASPEED=y +CONFIG_GPIO_ASPEED_SGPIO=y +CONFIG_GPIO_ASPEED_LTPI=y +CONFIG_SENSORS_ASPEED=y +CONFIG_SENSORS_ASPEED_G6=m +CONFIG_ASPEED_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_VIDEO_DEV=m +CONFIG_DVB_CORE=m +CONFIG_VIDEO_V4L2_I2C=m +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_MEDIA_CONTROLLER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +CONFIG_DVB_NET=m +CONFIG_DVB_MAX_ADAPTERS=16 +CONFIG_DVB_DYNAMIC_MINORS=y +CONFIG_MEDIA_PLATFORM_DRIVERS=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_VIDEO_ASPEED=m +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_DMA_CONTIG=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_USB_UHCI_PLATFORM=y +CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC=y +CONFIG_USB_UHCI_ASPEED=y +# CONFIG_USB_ASPEED_UDC is not set +CONFIG_USB_ASPEED_VHUB=m +CONFIG_MMC_SDHCI=y +CONFIG_MMC_CQHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_OF_ASPEED=y +# CONFIG_EDAC_ASPEED is not set +CONFIG_EDAC_AST2700=y +CONFIG_RTC_DRV_ASPEED=y +CONFIG_COMMON_CLK_ASPEED=y +CONFIG_COMMON_CLK_AST2700=y +CONFIG_ASPEED_LPC_CTRL=y +CONFIG_ASPEED_LPC_SNOOP=y +CONFIG_ASPEED_UART_ROUTING=y +CONFIG_ASPEED_P2A_CTRL=y +CONFIG_ASPEED_SOCINFO=y +CONFIG_ASPEED_BMC_DEV=y +CONFIG_ASPEED_HOST_BMC_DEV=y +CONFIG_ASPEED_SSP=y +CONFIG_ASPEED_MCTP=y +CONFIG_ASPEED_LPC_MAILBOX=y +CONFIG_ASPEED_XDMA=y +CONFIG_ASPEED_SBC=y +CONFIG_AST2700_ESPI=y +CONFIG_AST2700_RTC_OVER_ESPI=y +CONFIG_ASPEED_LPC_PCC=y +CONFIG_ASPEED_UDMA=y +CONFIG_ASPEED_OTP=y +CONFIG_AST2700_OTP=y +CONFIG_ASPEED_DISP_INTF=y +CONFIG_ASPEED_PCIE_MMBI=y +CONFIG_ASPEED_MBOX=y +CONFIG_ASPEED_ADC=m +CONFIG_IIO_HWMON=m +CONFIG_ASPEED_VIC=y +CONFIG_ASPEED_SCU_IC=y +CONFIG_ASPEED_I2C_IC=y +CONFIG_ASPEED_INTC=y +CONFIG_ASPEED_E2M_IC=y +CONFIG_RESET_ASPEED=y +CONFIG_SQUASHFS=y +CONFIG_CRYPTO_ENGINE=y +CONFIG_CRYPTO_DEV_ASPEED=y +# CONFIG_CRYPTO_DEV_ASPEED_DEBUG is not set +# CONFIG_CRYPTO_DEV_ASPEED_HACE is not set +# CONFIG_CRYPTO_DEV_ASPEED_RSSS is not set +# CONFIG_CRYPTO_DEV_ASPEED_ECDSA is not set + diff --git a/patches-sonic/aspeed-ast2700-support.patch b/patches-sonic/aspeed-ast2700-support.patch new file mode 100644 index 000000000..cc9a6c492 --- /dev/null +++ b/patches-sonic/aspeed-ast2700-support.patch @@ -0,0 +1,88868 @@ +From: Chander +Date: Tue, 23 Dec 2025 10:16:25 +0000 +Subject: [PATCH] Add Aspeed AST2700 support + +This patch adds support for Aspeed AST2700 ARM64 SoC including: +- Device tree files for AST2700 and related boards +- Aspeed-specific drivers (pinctrl, clk, soc, crypto, etc.) +- Faraday ethernet driver support +- Kconfig and Makefile updates + +Signed-off-by: Chander +--- +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms +--- a/arch/arm64/Kconfig.platforms 2025-08-01 08:48:47.000000000 +0000 ++++ b/arch/arm64/Kconfig.platforms 2025-12-23 10:16:21.331029200 +0000 +@@ -40,6 +40,12 @@ + This enables support for Apple's in-house ARM SoC family, starting + with the Apple M1. + ++config ARCH_ASPEED ++ bool "Aspeed SoC family" ++ help ++ Say yes if you intend to run on an Aspeed ast2700 or similar ++ seventh generation Aspeed BMCs. ++ + menuconfig ARCH_BCM + bool "Broadcom SoC Support" + +diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile +--- a/arch/arm64/boot/dts/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/arch/arm64/boot/dts/Makefile 2025-12-23 10:16:21.337029100 +0000 +@@ -9,6 +9,7 @@ + subdir-y += apm + subdir-y += apple + subdir-y += arm ++subdir-y += aspeed + subdir-y += bitmain + subdir-y += broadcom + subdir-y += cavium +diff --git a/arch/arm64/boot/dts/aspeed/Makefile b/arch/arm64/boot/dts/aspeed/Makefile +--- a/arch/arm64/boot/dts/aspeed/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/Makefile 2025-12-23 10:16:06.861271783 +0000 +@@ -0,0 +1,16 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++dtb-$(CONFIG_ARCH_ASPEED) += \ ++ ast2700-evb.dtb \ ++ ast2700-raw.dtb \ ++ ast2700-evb-s0.dtb \ ++ ast2700-evb-s1.dtb \ ++ ast2700-ncsi.dtb \ ++ ast2700-dcscm.dtb \ ++ ast2700-dcscm_ast1700-evb.dtb \ ++ ast2700-dcscm_ast1700-evb-dual.dtb \ ++ ast2700-dcscm_ast1800-evb.dtb \ ++ ast2700-evb-256-abr.dtb \ ++ ast2700-slt.dtb \ ++ ast2700-fpga.dtb \ ++ ast2700-ci-host.dtb +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi 2025-12-23 10:16:06.861271783 +0000 +@@ -0,0 +1,32 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++ ++partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ u-boot@0 { ++ reg = <0x0 0x400000>; // 4MB ++ label = "u-boot"; ++ }; ++ ++ u-boot-env@400000 { ++ reg = <0x400000 0x20000>; // 128KB ++ label = "u-boot-env"; ++ }; ++ ++ kernel@420000 { ++ reg = <0x420000 0x900000>; // 9MB ++ label = "kernel"; ++ }; ++ ++ rofs@d20000 { ++ reg = <0xd20000 0x52E0000>; // 82.875MB ++ label = "rofs"; ++ }; ++ ++ rwfs@6000000 { ++ reg = <0x6000000 0x2000000>; // 32MB ++ label = "rwfs"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-256-abr.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-256-abr.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-256-abr.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-256-abr.dtsi 2025-12-23 10:16:06.861271783 +0000 +@@ -0,0 +1,57 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++ ++partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ u-boot@0 { ++ reg = <0x0 0x400000>; // 4MB ++ label = "u-boot-a"; ++ }; ++ ++ u-boot-env@400000 { ++ reg = <0x400000 0x20000>; // 128KB ++ label = "u-boot-env-a"; ++ }; ++ ++ kernel@420000 { ++ reg = <0x420000 0x900000>; // 9MB ++ label = "kernel-a"; ++ }; ++ ++ rofs@d20000 { ++ reg = <0xd20000 0x52E0000>; // 82.875MB ++ label = "rofs-a"; ++ }; ++ ++ rwfs@6000000 { ++ reg = <0x6000000 0x2000000>; // 32MB ++ label = "rwfs-a"; ++ }; ++ ++ u-boot@8000000 { ++ reg = <0x8000000 0x400000>; // 4MB ++ label = "u-boot-b"; ++ }; ++ ++ u-boot-env@8200000 { ++ reg = <0x8400000 0x20000>; // 128KB ++ label = "u-boot-env-b"; ++ }; ++ ++ kernel@8420000 { ++ reg = <0x8420000 0x900000>; // 9MB ++ label = "kernel-b"; ++ }; ++ ++ rofs@8d20000 { ++ reg = <0x8d20000 0x52E0000>; // 82.875MB ++ label = "rofs-b"; ++ }; ++ ++ rwfs@E000000 { ++ reg = <0xE000000 0x2000000>; // 32MB ++ label = "rwfs-b"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi 2025-12-23 10:16:06.861271783 +0000 +@@ -0,0 +1,1425 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright 2025 ASPEED Corp. ++ ++&pinctrl0 { ++ pinctrl_emmc_default: emmc-default { ++ function = "EMMC"; ++ groups = "EMMCG1"; ++ }; ++ ++ pinctrl_emmcg4_default: emmc-default { ++ function = "EMMC"; ++ groups = "EMMCG4"; ++ }; ++ ++ pinctrl_emmcg8_default: emmcg8-default { ++ function = "EMMC"; ++ groups = "EMMCG8"; ++ }; ++ ++ pinctrl_emmcwpn_default: emmcwpn-default { ++ function = "EMMC"; ++ groups = "EMMCWPN"; ++ }; ++ ++ pinctrl_emmccdn_default: emmccdn-default { ++ function = "EMMC"; ++ groups = "EMMCCDN"; ++ }; ++ ++ pinctrl_vgaddc_default: vgaddc-default { ++ function = "VGADDC"; ++ groups = "VGADDC"; ++ }; ++ ++ pinctrl_usb3axhd_default: usb3axhd-default { ++ function = "USB3A"; ++ groups = "USB3AXHD"; ++ }; ++ ++ pinctrl_usb3axhpd_default: usb3axhpd-default { ++ function = "USB3A"; ++ groups = "USB3AXHPD"; ++ }; ++ ++ pinctrl_usb3axh_default: usb3axh-default { ++ function = "USB3A"; ++ groups = "USB3AXH"; ++ }; ++ ++ pinctrl_usb3axhp_default: usb3axhp-default { ++ function = "USB3A"; ++ groups = "USB3AXHP"; ++ }; ++ ++ pinctrl_usb3axh2b_default: usb3axh2b-default { ++ function = "USB3A"; ++ groups = "USB3AXH2B"; ++ }; ++ ++ pinctrl_usb3axhp2b_default: usb3axhp2b-default { ++ function = "USB3A"; ++ groups = "USB3AXHP2B"; ++ }; ++ ++ pinctrl_usb2axhd1_default: usb2axhd1-default { ++ function = "USB2A"; ++ groups = "USB2AXHD1"; ++ }; ++ ++ pinctrl_usb2axhpd1_default: usb2axhpd1-default { ++ function = "USB2A"; ++ groups = "USB2AXHPD1"; ++ }; ++ ++ pinctrl_usb2ad1_default: usb2ad1-default { ++ function = "USB2A"; ++ groups = "USB2AD1"; ++ }; ++ ++ pinctrl_usb2axh_default: usb2axh-default { ++ function = "USB2A"; ++ groups = "USB2AXH"; ++ }; ++ ++ pinctrl_usb2axhp_default: usb2axhp-default { ++ function = "USB2A"; ++ groups = "USB2AXHP"; ++ }; ++ ++ pinctrl_usb2axh2b_default: usb2axh2b-default { ++ function = "USB2A"; ++ groups = "USB2AXH2B"; ++ }; ++ ++ pinctrl_usb2axhp2b_default: usb2axhp2b-default { ++ function = "USB2A"; ++ groups = "USB2AXHP2B"; ++ }; ++ ++ pinctrl_usb2ahpd0_default: usb2ahpd0-default { ++ function = "USB2A"; ++ groups = "USB2AHPD0"; ++ }; ++ ++ pinctrl_usb2ad0_default: usb2ad0-default { ++ function = "USB2A"; ++ groups = "USB2AD0"; ++ }; ++ ++ pinctrl_usb2ah_default: usb2ah-default { ++ function = "USB2A"; ++ groups = "USB2AH"; ++ }; ++ ++ pinctrl_usb2ahp_default: usb2ahp-default { ++ function = "USB2A"; ++ groups = "USB2AHP"; ++ }; ++ ++ pinctrl_usb3bxhd_default: usb3bxhd-default { ++ function = "USB3B"; ++ groups = "USB3BXHD"; ++ }; ++ ++ pinctrl_usb3bxhpd_default: usb3bxhpd-default { ++ function = "USB3B"; ++ groups = "USB3BXHPD"; ++ }; ++ ++ pinctrl_usb3bxh_default: usb3bxh-default { ++ function = "USB3B"; ++ groups = "USB3BXH"; ++ }; ++ ++ pinctrl_usb3bxhp_default: usb3bxhp-default { ++ function = "USB3B"; ++ groups = "USB3BXHP"; ++ }; ++ ++ pinctrl_usb3bxh2a_default: usb3bxh2a-default { ++ function = "USB3B"; ++ groups = "USB3BXH2A"; ++ }; ++ ++ pinctrl_usb3bxhp2a_default: usb3bxhp2a-default { ++ function = "USB3B"; ++ groups = "USB3BXHP2A"; ++ }; ++ ++ pinctrl_usb2bxhd1_default: usb2bxhd1-default { ++ function = "USB2B"; ++ groups = "USB2BXHD1"; ++ }; ++ ++ pinctrl_usb2bxhpd1_default: usb2bxhpd1-default { ++ function = "USB2B"; ++ groups = "USB2BXHPD1"; ++ }; ++ ++ pinctrl_usb2bd1_default: usb2bd1-default { ++ function = "USB2B"; ++ groups = "USB2BD1"; ++ }; ++ ++ pinctrl_usb2bxh_default: usb2bxh-default { ++ function = "USB2B"; ++ groups = "USB2BXH"; ++ }; ++ ++ pinctrl_usb2bxhp_default: usb2bxhp-default { ++ function = "USB2B"; ++ groups = "USB2BXHP"; ++ }; ++ ++ pinctrl_usb2bxh2a_default: usb2bxh2a-default { ++ function = "USB2B"; ++ groups = "USB2BXH2A"; ++ }; ++ ++ pinctrl_usb2bxhp2a_default: usb2bxhp2a-default { ++ function = "USB2B"; ++ groups = "USB2BXHP2A"; ++ }; ++ ++ pinctrl_usb2bhpd0_default: usb2bhpd0-default { ++ function = "USB2B"; ++ groups = "USB2BHPD0"; ++ }; ++ ++ pinctrl_usb2bd0_default: usb2bd0-default { ++ function = "USB2B"; ++ groups = "USB2BD0"; ++ }; ++ ++ pinctrl_usb2bh_default: usb2bh-default { ++ function = "USB2B"; ++ groups = "USB2BH"; ++ }; ++ ++ pinctrl_usb2bhp_default: usb2bhp-default { ++ function = "USB2B"; ++ groups = "USB2BHP"; ++ }; ++ ++ pinctrl_jtagm0_default: jtagm0-default { ++ function = "JTAG0"; ++ groups = "JTAGM0"; ++ }; ++ ++ pinctrl_jtag_psp_default: jtag-psp-default { ++ function = "JTAG0"; ++ groups = "PSP"; ++ }; ++ ++ pinctrl_jtag_ssp_default: jtag-ssp-default { ++ function = "JTAG0"; ++ groups = "SSP"; ++ }; ++ ++ pinctrl_jtag_tsp_default: jtag-tsp-default { ++ function = "JTAG0"; ++ groups = "TSP"; ++ }; ++ ++ pinctrl_jtag_ddr_default: jtag-ddr-default { ++ function = "JTAG0"; ++ groups = "DDR"; ++ }; ++ ++ pinctrl_jtag_usb3a_default: jtag-usb3a-default { ++ function = "JTAG0"; ++ groups = "USB3A"; ++ }; ++ ++ pinctrl_jtag_usb3b_default: jtag-usb3b-default { ++ function = "JTAG0"; ++ groups = "USB3B"; ++ }; ++ ++ pinctrl_jtag_pciea_default: jtag-pciea-default { ++ function = "JTAG0"; ++ groups = "PCIEA"; ++ }; ++ ++ pinctrl_jtag_pcieb_default: jtag-pcieb-default { ++ function = "JTAG0"; ++ groups = "PCIEB"; ++ }; ++ ++ pinctrl_pcierc0_perst_default: pcierc0-perst-default { ++ function = "PCIERC"; ++ groups = "PCIERC0PERST"; ++ }; ++ ++ pinctrl_pcierc1_perst_default: pcierc1-perst-default { ++ function = "PCIERC"; ++ groups = "PCIERC1PERST"; ++ }; ++}; ++ ++&pinctrl1 { ++ pinctrl_sgpm0_default: sgpm0-default { ++ function = "SGPM0"; ++ groups = "SGPM0"; ++ }; ++ ++ pinctrl_dsgpm0_default: dsgpm0-default { ++ function = "SGPM0"; ++ groups = "DSGPM0"; ++ }; ++ ++ pinctrl_sgpm1_default: sgpm1-default { ++ function = "SGPM1"; ++ groups = "SGPM1"; ++ }; ++ ++ pinctrl_sgps_default: sgps-default { ++ function = "SGPS"; ++ groups = "SGPS"; ++ }; ++ ++ pinctrl_adc0_default: adc0-default { ++ function = "ADC0"; ++ groups = "ADC0"; ++ }; ++ ++ pinctrl_adc1_default: adc1-default { ++ function = "ADC1"; ++ groups = "ADC1"; ++ }; ++ ++ pinctrl_adc2_default: adc2-default { ++ function = "ADC2"; ++ groups = "ADC2"; ++ }; ++ ++ pinctrl_adc3_default: adc3-default { ++ function = "ADC3"; ++ groups = "ADC3"; ++ }; ++ ++ pinctrl_adc4_default: adc4-default { ++ function = "ADC4"; ++ groups = "ADC4"; ++ }; ++ ++ pinctrl_adc5_default: adc5-default { ++ function = "ADC5"; ++ groups = "ADC5"; ++ }; ++ ++ pinctrl_adc6_default: adc6-default { ++ function = "ADC6"; ++ groups = "ADC6"; ++ }; ++ ++ pinctrl_adc7_default: adc7-default { ++ function = "ADC7"; ++ groups = "ADC7"; ++ }; ++ ++ pinctrl_adc8_default: adc8-default { ++ function = "ADC8"; ++ groups = "ADC8"; ++ }; ++ ++ pinctrl_adc9_default: adc9-default { ++ function = "ADC9"; ++ groups = "ADC9"; ++ }; ++ ++ pinctrl_adc10_default: adc10-default { ++ function = "ADC10"; ++ groups = "ADC10"; ++ }; ++ ++ pinctrl_adc11_default: adc11-default { ++ function = "ADC11"; ++ groups = "ADC11"; ++ }; ++ ++ pinctrl_adc12_default: adc12-default { ++ function = "ADC12"; ++ groups = "ADC12"; ++ }; ++ ++ pinctrl_adc13_default: adc13-default { ++ function = "ADC13"; ++ groups = "ADC13"; ++ }; ++ ++ pinctrl_adc14_default: adc14-default { ++ function = "ADC14"; ++ groups = "ADC14"; ++ }; ++ ++ pinctrl_adc15_default: adc15-default { ++ function = "ADC15"; ++ groups = "ADC15"; ++ }; ++ ++ pinctrl_pwm0_default: pwm0-default { ++ function = "PWM0"; ++ groups = "PWM0"; ++ }; ++ ++ pinctrl_pwm1_default: pwm1-default { ++ function = "PWM1"; ++ groups = "PWM1"; ++ }; ++ ++ pinctrl_pwm2_default: pwm2-default { ++ function = "PWM2"; ++ groups = "PWM2"; ++ }; ++ ++ pinctrl_pwm3_default: pwm3-default { ++ function = "PWM3"; ++ groups = "PWM3"; ++ }; ++ ++ pinctrl_pwm4_default: pwm4-default { ++ function = "PWM4"; ++ groups = "PWM4"; ++ }; ++ ++ pinctrl_pwm5_default: pwm5-default { ++ function = "PWM5"; ++ groups = "PWM5"; ++ }; ++ ++ pinctrl_pwm6_default: pwm6-default { ++ function = "PWM6"; ++ groups = "PWM6"; ++ }; ++ ++ pinctrl_pwm7_default: pwm7-default { ++ function = "PWM7"; ++ groups = "PWM7"; ++ }; ++ ++ pinctrl_pwm8_default: pwm8-default { ++ function = "PWM8"; ++ groups = "PWM8"; ++ }; ++ ++ pinctrl_pwm9_default: pwm9-default { ++ function = "PWM9"; ++ groups = "PWM9"; ++ }; ++ ++ pinctrl_pwm10_default: pwm10-default { ++ function = "PWM10"; ++ groups = "PWM10"; ++ }; ++ ++ pinctrl_pwm11_default: pwm11-default { ++ function = "PWM11"; ++ groups = "PWM11"; ++ }; ++ ++ pinctrl_pwm12_default: pwm12-default { ++ function = "PWM12"; ++ groups = "PWM12"; ++ }; ++ ++ pinctrl_pwm13_default: pwm13-default { ++ function = "PWM13"; ++ groups = "PWM13"; ++ }; ++ ++ pinctrl_pwm14_default: pwm14-default { ++ function = "PWM14"; ++ groups = "PWM14"; ++ }; ++ ++ pinctrl_pwm15_default: pwm15-default { ++ function = "PWM15"; ++ groups = "PWM15"; ++ }; ++ ++ pinctrl_tach0_default: tach0-default { ++ function = "TACH0"; ++ groups = "TACH0"; ++ }; ++ ++ pinctrl_tach1_default: tach1-default { ++ function = "TACH1"; ++ groups = "TACH1"; ++ }; ++ ++ pinctrl_tach2_default: tach2-default { ++ function = "TACH2"; ++ groups = "TACH2"; ++ }; ++ ++ pinctrl_tach3_default: tach3-default { ++ function = "TACH3"; ++ groups = "TACH3"; ++ }; ++ ++ pinctrl_tach4_default: tach4-default { ++ function = "TACH4"; ++ groups = "TACH4"; ++ }; ++ ++ pinctrl_tach5_default: tach5-default { ++ function = "TACH5"; ++ groups = "TACH5"; ++ }; ++ ++ pinctrl_tach6_default: tach6-default { ++ function = "TACH6"; ++ groups = "TACH6"; ++ }; ++ ++ pinctrl_tach7_default: tach7-default { ++ function = "TACH7"; ++ groups = "TACH7"; ++ }; ++ ++ pinctrl_tach8_default: tach8-default { ++ function = "TACH8"; ++ groups = "TACH8"; ++ }; ++ ++ pinctrl_tach9_default: tach9-default { ++ function = "TACH9"; ++ groups = "TACH9"; ++ }; ++ ++ pinctrl_tach10_default: tach10-default { ++ function = "TACH10"; ++ groups = "TACH10"; ++ }; ++ ++ pinctrl_tach11_default: tach11-default { ++ function = "TACH11"; ++ groups = "TACH11"; ++ }; ++ ++ pinctrl_tach12_default: tach12-default { ++ function = "TACH12"; ++ groups = "TACH12"; ++ }; ++ ++ pinctrl_tach13_default: tach13-default { ++ function = "TACH13"; ++ groups = "TACH13"; ++ }; ++ ++ pinctrl_tach14_default: tach14-default { ++ function = "TACH14"; ++ groups = "TACH14"; ++ }; ++ ++ pinctrl_tach15_default: tach15-default { ++ function = "TACH15"; ++ groups = "TACH15"; ++ }; ++ ++ pinctrl_jtagm1_default: jtagm1-default { ++ function = "JTAGM1"; ++ groups = "JTAGM1"; ++ }; ++ ++ pinctrl_mdio0_default: mdio0-default { ++ function = "MDIO0"; ++ groups = "MDIO0"; ++ }; ++ ++ pinctrl_mdio1_default: mdio1-default { ++ function = "MDIO1"; ++ groups = "MDIO1"; ++ }; ++ ++ pinctrl_mdio2_default: mdio2-default { ++ function = "MDIO2"; ++ groups = "MDIO2"; ++ }; ++ ++ pinctrl_rgmii0_default: rgmii0-default { ++ function = "RGMII0"; ++ groups = "RGMII0"; ++ }; ++ ++ pinctrl_rgmii1_default: rgmii1-default { ++ function = "RGMII1"; ++ groups = "RGMII1"; ++ }; ++ ++ pinctrl_rmii0_default: rmii0-default { ++ function = "RMII0"; ++ groups = "RMII0"; ++ }; ++ ++ pinctrl_rmii0_rclko_default: rmii0-rclko-default { ++ function = "RMII0RCLKO"; ++ groups = "RMII0RCLKO"; ++ }; ++ ++ pinctrl_rmii1_default: rmii1-default { ++ function = "RMII1"; ++ groups = "RMII1"; ++ }; ++ ++ pinctrl_rmii1_rclko_default: rmii1-rclko-default { ++ function = "RMII1RCLKO"; ++ groups = "RMII1RCLKO"; ++ }; ++ ++ pinctrl_sgmii_default: sgmii-default { ++ function = "SGMII"; ++ groups = "SGMII"; ++ }; ++ ++ pinctrl_fwspi_quad_default: fwspi-quad-default { ++ function = "FWQSPI"; ++ groups = "FWQSPI"; ++ }; ++ ++ pinctrl_fsi0_default: fsi0-default { ++ function = "FSI0"; ++ groups = "FSI0"; ++ }; ++ ++ pinctrl_fsi1_default: fsi1-default { ++ function = "FSI1"; ++ groups = "FSI1"; ++ }; ++ ++ pinctrl_fsi2_default: fsi2-default { ++ function = "FSI2"; ++ groups = "FSI2"; ++ }; ++ ++ pinctrl_fsi3_default: fsi3-default { ++ function = "FSI3"; ++ groups = "FSI3"; ++ }; ++ ++ pinctrl_spi0_default: spi0-default { ++ function = "SPI0"; ++ groups = "SPI0"; ++ }; ++ ++ pinctrl_spi0_quad_default: spi0-quad-default { ++ function = "QSPI0"; ++ groups = "QSPI0"; ++ }; ++ ++ pinctrl_spi0_cs1_default: spi0-cs1-default { ++ function = "SPI0CS1"; ++ groups = "SPI0CS1"; ++ }; ++ ++ pinctrl_spi1_default: spi1-default { ++ function = "SPI1"; ++ groups = "SPI1"; ++ }; ++ ++ pinctrl_spi1_quad_default: spi1-quad-default { ++ function = "QSPI1"; ++ groups = "QSPI1"; ++ }; ++ ++ pinctrl_spi1_cs1_default: spi1-cs1-default { ++ function = "SPI1CS1"; ++ groups = "SPI1CS1"; ++ }; ++ ++ pinctrl_spi2_default: spi2-default { ++ function = "SPI2"; ++ groups = "SPI2"; ++ }; ++ ++ pinctrl_spi2_quad_default: spi2-quad-default { ++ function = "QSPI2"; ++ groups = "QSPI2"; ++ }; ++ ++ pinctrl_spi2_cs1_default: spi2-cs1-default { ++ function = "SPI2CS1"; ++ groups = "SPI2CS1"; ++ }; ++ ++ pinctrl_espi0_default: espi0-default { ++ function = "ESPI0"; ++ groups = "ESPI0"; ++ }; ++ ++ pinctrl_espi1_default: espi1-default { ++ function = "ESPI1"; ++ groups = "ESPI1"; ++ }; ++ ++ pinctrl_lpc0_default: lpc0-default { ++ function = "LPC0"; ++ groups = "LPC0"; ++ }; ++ ++ pinctrl_lpc1_default: lpc1-default { ++ function = "LPC1"; ++ groups = "LPC1"; ++ }; ++ ++ pinctrl_vpi_default: vpi-default { ++ function = "VPI"; ++ groups = "VPI"; ++ }; ++ ++ pinctrl_sd_default: sd-default { ++ function = "SD"; ++ groups = "SD"; ++ }; ++ ++ pinctrl_hvi3c0_default: hvi3c0-default { ++ function = "I3C0"; ++ groups = "HVI3C0"; ++ }; ++ ++ pinctrl_hvi3c1_default: hvi3c1-default { ++ function = "I3C1"; ++ groups = "HVI3C1"; ++ }; ++ ++ pinctrl_hvi3c2_default: hvi3c2-default { ++ function = "I3C2"; ++ groups = "HVI3C2"; ++ }; ++ ++ pinctrl_hvi3c3_default: hvi3c3-default { ++ function = "I3C3"; ++ groups = "HVI3C3"; ++ }; ++ ++ pinctrl_i3c4_default: i3c4-default { ++ function = "I3C4"; ++ groups = "I3C4"; ++ }; ++ ++ pinctrl_i3c5_default: i3c5-default { ++ function = "I3C5"; ++ groups = "I3C5"; ++ }; ++ ++ pinctrl_i3c6_default: i3c6-default { ++ function = "I3C6"; ++ groups = "I3C6"; ++ }; ++ ++ pinctrl_i3c7_default: i3c7-default { ++ function = "I3C7"; ++ groups = "I3C7"; ++ }; ++ ++ pinctrl_i3c8_default: i3c8-default { ++ function = "I3C8"; ++ groups = "I3C8"; ++ }; ++ ++ pinctrl_i3c9_default: i3c9-default { ++ function = "I3C9"; ++ groups = "I3C9"; ++ }; ++ ++ pinctrl_i3c10_default: i3c10-default { ++ function = "I3C10"; ++ groups = "I3C10"; ++ }; ++ ++ pinctrl_i3c11_default: i3c11-default { ++ function = "I3C11"; ++ groups = "I3C11"; ++ }; ++ ++ pinctrl_hvi3c12_default: hvi3c12-default { ++ function = "I3C12"; ++ groups = "HVI3C12"; ++ }; ++ ++ pinctrl_hvi3c13_default: hvi3c13-default { ++ function = "I3C13"; ++ groups = "HVI3C13"; ++ }; ++ ++ pinctrl_hvi3c14_default: hvi3c14-default { ++ function = "I3C14"; ++ groups = "HVI3C14"; ++ }; ++ ++ pinctrl_hvi3c15_default: hvi3c15-default { ++ function = "I3C15"; ++ groups = "HVI3C15"; ++ }; ++ ++ pinctrl_tach0_default: tach0-default { ++ function = "TACH0"; ++ groups = "TACH0"; ++ }; ++ ++ pinctrl_tach1_default: tach1-default { ++ function = "TACH1"; ++ groups = "TACH1"; ++ }; ++ ++ pinctrl_tach2_default: tach2-default { ++ function = "TACH2"; ++ groups = "TACH2"; ++ }; ++ ++ pinctrl_tach3_default: tach3-default { ++ function = "TACH3"; ++ groups = "TACH3"; ++ }; ++ ++ pinctrl_tach4_default: tach4-default { ++ function = "TACH4"; ++ groups = "TACH4"; ++ }; ++ ++ pinctrl_tach5_default: tach5-default { ++ function = "TACH5"; ++ groups = "TACH5"; ++ }; ++ ++ pinctrl_tach6_default: tach6-default { ++ function = "TACH6"; ++ groups = "TACH6"; ++ }; ++ ++ pinctrl_tach7_default: tach7-default { ++ function = "TACH7"; ++ groups = "TACH7"; ++ }; ++ ++ pinctrl_tach8_default: tach8-default { ++ function = "TACH8"; ++ groups = "TACH8"; ++ }; ++ ++ pinctrl_tach9_default: tach9-default { ++ function = "TACH9"; ++ groups = "TACH9"; ++ }; ++ ++ pinctrl_tach10_default: tach10-default { ++ function = "TACH10"; ++ groups = "TACH10"; ++ }; ++ ++ pinctrl_tach11_default: tach11-default { ++ function = "TACH11"; ++ groups = "TACH11"; ++ }; ++ ++ pinctrl_tach12_default: tach12-default { ++ function = "TACH12"; ++ groups = "TACH12"; ++ }; ++ ++ pinctrl_tach13_default: tach13-default { ++ function = "TACH13"; ++ groups = "TACH13"; ++ }; ++ ++ pinctrl_tach14_default: tach14-default { ++ function = "TACH14"; ++ groups = "TACH14"; ++ }; ++ ++ pinctrl_tach15_default: tach15-default { ++ function = "TACH15"; ++ groups = "TACH15"; ++ }; ++ ++ pinctrl_thru0_default: thru0-default { ++ function = "THRU0"; ++ groups = "THRU0"; ++ }; ++ ++ pinctrl_thru1_default: thru1-default { ++ function = "THRU1"; ++ groups = "THRU1"; ++ }; ++ ++ pinctrl_thru2_default: thru2-default { ++ function = "THRU2"; ++ groups = "THRU2"; ++ }; ++ ++ pinctrl_thru3_default: thru3-default { ++ function = "THRU3"; ++ groups = "THRU3"; ++ }; ++ ++ pinctrl_ncts5_default: ncts5-default { ++ function = "NCTS5"; ++ groups = "NCTS5"; ++ }; ++ ++ pinctrl_ndcd5_default: ndcd5-default { ++ function = "NDCD5"; ++ groups = "NDCD5"; ++ }; ++ ++ pinctrl_ndsr5_default: ndsr5-default { ++ function = "NDSR5"; ++ groups = "NDSR5"; ++ }; ++ ++ pinctrl_nri5_default: nri5-default { ++ function = "NRI5"; ++ groups = "NRI5"; ++ }; ++ ++ pinctrl_i2c0_default: i2c0-default { ++ function = "I2C0"; ++ groups = "I2C0"; ++ }; ++ ++ pinctrl_i2c1_default: i2c1-default { ++ function = "I2C1"; ++ groups = "I2C1"; ++ }; ++ ++ pinctrl_i2c2_default: i2c2-default { ++ function = "I2C2"; ++ groups = "I2C2"; ++ }; ++ ++ pinctrl_i2c3_default: i2c3-default { ++ function = "I2C3"; ++ groups = "I2C3"; ++ }; ++ ++ pinctrl_i2c4_default: i2c4-default { ++ function = "I2C4"; ++ groups = "I2C4"; ++ }; ++ ++ pinctrl_i2c5_default: i2c5-default { ++ function = "I2C5"; ++ groups = "I2C5"; ++ }; ++ ++ pinctrl_i2c6_default: i2c6-default { ++ function = "I2C6"; ++ groups = "I2C6"; ++ }; ++ ++ pinctrl_i2c7_default: i2c7-default { ++ function = "I2C7"; ++ groups = "I2C7"; ++ }; ++ ++ pinctrl_i2c8_default: i2c8-default { ++ function = "I2C8"; ++ groups = "I2C8"; ++ }; ++ ++ pinctrl_i2c9_default: i2c9-default { ++ function = "I2C9"; ++ groups = "I2C9"; ++ }; ++ ++ pinctrl_i2c10_default: i2c10-default { ++ function = "I2C10"; ++ groups = "I2C10"; ++ }; ++ ++ pinctrl_i2c11_default: i2c11-default { ++ function = "I2C11"; ++ groups = "I2C11"; ++ }; ++ ++ pinctrl_i2c12_default: i2c12-default { ++ function = "I2C12"; ++ groups = "I2C12"; ++ }; ++ ++ pinctrl_i2c13_default: i2c13-default { ++ function = "I2C13"; ++ groups = "I2C13"; ++ }; ++ ++ pinctrl_i2c14_default: i2c14-default { ++ function = "I2C14"; ++ groups = "I2C14"; ++ }; ++ ++ pinctrl_i2c15_default: i2c15-default { ++ function = "I2C15"; ++ groups = "I2C15"; ++ }; ++ ++ pinctrl_salt0_default: salt0-default { ++ function = "SALT0"; ++ groups = "SALT0"; ++ }; ++ ++ pinctrl_salt1_default: salt1-default { ++ function = "SALT1"; ++ groups = "SALT1"; ++ }; ++ ++ pinctrl_salt2_default: salt2-default { ++ function = "SALT2"; ++ groups = "SALT2"; ++ }; ++ ++ pinctrl_salt3_default: salt3-default { ++ function = "SALT3"; ++ groups = "SALT3"; ++ }; ++ ++ pinctrl_salt4_default: salt4-default { ++ function = "SALT4"; ++ groups = "SALT4"; ++ }; ++ ++ pinctrl_salt5_default: salt5-default { ++ function = "SALT5"; ++ groups = "SALT5"; ++ }; ++ ++ pinctrl_salt6_default: salt6-default { ++ function = "SALT6"; ++ groups = "SALT6"; ++ }; ++ ++ pinctrl_salt7_default: salt7-default { ++ function = "SALT7"; ++ groups = "SALT7"; ++ }; ++ ++ pinctrl_salt8_default: salt8-default { ++ function = "SALT8"; ++ groups = "SALT8"; ++ }; ++ ++ pinctrl_salt9_default: salt9-default { ++ function = "SALT9"; ++ groups = "SALT9"; ++ }; ++ ++ pinctrl_salt10_default: salt10-default { ++ function = "SALT10"; ++ groups = "SALT10"; ++ }; ++ ++ pinctrl_salt11_default: salt11-default { ++ function = "SALT11"; ++ groups = "SALT11"; ++ }; ++ ++ pinctrl_salt12_default: salt12-default { ++ function = "SALT12"; ++ groups = "SALT12"; ++ }; ++ ++ pinctrl_salt13_default: salt13-default { ++ function = "SALT13"; ++ groups = "SALT13"; ++ }; ++ ++ pinctrl_salt14_default: salt14-default { ++ function = "SALT14"; ++ groups = "SALT14"; ++ }; ++ ++ pinctrl_salt15_default: salt15-default { ++ function = "SALT15"; ++ groups = "SALT15"; ++ }; ++ ++ pinctrl_ltpipsi2c0_default: ltpipsi2c0_default { ++ function = "I2C0"; ++ groups = "LTPI_PS_I2C0"; ++ }; ++ ++ pinctrl_ltpipsi2c1_default: ltpipsi2c1_default { ++ function = "I2C1"; ++ groups = "LTPI_PS_I2C1"; ++ }; ++ ++ pinctrl_ltpipsi2c2_default: ltpipsi2c2_default { ++ function = "I2C2"; ++ groups = "LTPI_PS_I2C2"; ++ }; ++ ++ pinctrl_ltpipsi2c3_default: ltpipsi2c3_default { ++ function = "I2C3"; ++ groups = "LTPI_PS_I2C3"; ++ }; ++ ++ pinctrl_can_default: can-default { ++ function = "CANBUS"; ++ groups = "CANBUS"; ++ }; ++ ++ pinctrl_di2c0_default: di2c0-default { ++ function = "I2C0"; ++ groups = "DI2C0"; ++ }; ++ ++ pinctrl_di2c1_default: di2c1-default { ++ function = "I2C1"; ++ groups = "DI2C1"; ++ }; ++ ++ pinctrl_di2c2_default: di2c2-default { ++ function = "I2C2"; ++ groups = "DI2C2"; ++ }; ++ ++ pinctrl_di2c3_default: di2c3-default { ++ function = "I2C3"; ++ groups = "DI2C3"; ++ }; ++ pinctrl_di2c8_default: di2c8-default { ++ function = "I2C8"; ++ groups = "DI2C8"; ++ }; ++ ++ pinctrl_di2c9_default: di2c9-default { ++ function = "I2C9"; ++ groups = "DI2C9"; ++ }; ++ ++ pinctrl_di2c10_default: di2c10-default { ++ function = "I2C10"; ++ groups = "DI2C10"; ++ }; ++ ++ pinctrl_di2c11_default: di2c11-default { ++ function = "I2C11"; ++ groups = "DI2C11"; ++ }; ++ ++ pinctrl_di2c12_default: di2c12-default { ++ function = "I2C12"; ++ groups = "DI2C12"; ++ }; ++ ++ pinctrl_di2c13_default: di2c13-default { ++ function = "I2C13"; ++ groups = "DI2C13"; ++ }; ++ ++ pinctrl_di2c14_default: di2c14-default { ++ function = "I2C14"; ++ groups = "DI2C14"; ++ }; ++ ++ pinctrl_di2c15_default: di2c15-default { ++ function = "I2C15"; ++ groups = "DI2C15"; ++ }; ++ ++ pinctrl_ncts0_default: ncts0-default { ++ function = "UART0"; ++ groups = "NCTS0"; ++ }; ++ ++ pinctrl_ndcd0_default: ndcd0-default { ++ function = "UART0"; ++ groups = "NDCD0"; ++ }; ++ ++ pinctrl_ndsr0_default: ndsr0-default { ++ function = "UART0"; ++ groups = "NDSR0"; ++ }; ++ ++ pinctrl_nri0_default: nri0-default { ++ function = "UART0"; ++ groups = "NRI0"; ++ }; ++ ++ pinctrl_ndtr0_default: ndtr0-default { ++ function = "UART0"; ++ groups = "NDTR0"; ++ }; ++ ++ pinctrl_nrts0_default: nrts0-default { ++ function = "UART0"; ++ groups = "NRTS0"; ++ }; ++ ++ pinctrl_txd0_default: txd0-default { ++ function = "UART0"; ++ groups = "TXD0"; ++ }; ++ ++ pinctrl_rxd0_default: rxd0-default { ++ function = "UART0"; ++ groups = "RXD0"; ++ }; ++ ++ pinctrl_ncts1_default: ncts1-default { ++ function = "UART1"; ++ groups = "NCTS1"; ++ }; ++ ++ pinctrl_ndcd1_default: ndcd1-default { ++ function = "UART1"; ++ groups = "NDCD1"; ++ }; ++ ++ pinctrl_ndsr1_default: ndsr1-default { ++ function = "UART1"; ++ groups = "NDSR1"; ++ }; ++ ++ pinctrl_nri1_default: nri1-default { ++ function = "UART1"; ++ groups = "NRI1"; ++ }; ++ ++ pinctrl_ndtr1_default: ndtr1-default { ++ function = "UART1"; ++ groups = "NDTR1"; ++ }; ++ ++ pinctrl_nrts1_default: nrts1-default { ++ function = "UART1"; ++ groups = "NRTS1"; ++ }; ++ ++ pinctrl_txd1_default: txd1-default { ++ function = "UART1"; ++ groups = "TXD1"; ++ }; ++ ++ pinctrl_rxd1_default: rxd1-default { ++ function = "UART1"; ++ groups = "RXD1"; ++ }; ++ ++ pinctrl_txd2_default: txd2-default { ++ function = "UART2"; ++ groups = "TXD2"; ++ }; ++ ++ pinctrl_rxd2_default: rxd2-default { ++ function = "UART2"; ++ groups = "RXD2"; ++ }; ++ ++ pinctrl_txd3_default: txd3-default { ++ function = "UART3"; ++ groups = "TXD3"; ++ }; ++ ++ pinctrl_rxd3_default: rxd3-default { ++ function = "UART3"; ++ groups = "RXD3"; ++ }; ++ ++ pinctrl_ncts5_default: ncts5-default { ++ function = "UART5"; ++ groups = "NCTS5"; ++ }; ++ ++ pinctrl_ndcd5_default: ndcd5-default { ++ function = "UART5"; ++ groups = "NDCD5"; ++ }; ++ ++ pinctrl_ndsr5_default: ndsr5-default { ++ function = "UART5"; ++ groups = "NDSR5"; ++ }; ++ ++ pinctrl_nri5_default: nri5-default { ++ function = "UART5"; ++ groups = "NRI5"; ++ }; ++ ++ pinctrl_ndtr5_default: ndtr5-default { ++ function = "UART5"; ++ groups = "NDTR5"; ++ }; ++ ++ pinctrl_nrts5_default: nrts5-default { ++ function = "UART5"; ++ groups = "NRTS5"; ++ }; ++ ++ pinctrl_txd5_default: txd5-default { ++ function = "UART5"; ++ groups = "TXD5"; ++ }; ++ ++ pinctrl_rxd5_default: rxd5-default { ++ function = "UART5"; ++ groups = "RXD5"; ++ }; ++ ++ pinctrl_ncts6_default: ncts6-default { ++ function = "UART6"; ++ groups = "NCTS6"; ++ }; ++ ++ pinctrl_ndcd6_default: ndcd6-default { ++ function = "UART6"; ++ groups = "NDCD6"; ++ }; ++ ++ pinctrl_ndsr6_default: ndsr6-default { ++ function = "UART6"; ++ groups = "NDSR6"; ++ }; ++ ++ pinctrl_nri6_default: nri6-default { ++ function = "UART6"; ++ groups = "NRI6"; ++ }; ++ ++ pinctrl_ndtr6_default: ndtr6-default { ++ function = "UART6"; ++ groups = "NDTR6"; ++ }; ++ ++ pinctrl_nrts6_default: nrts6-default { ++ function = "UART6"; ++ groups = "NRTS6"; ++ }; ++ ++ pinctrl_txd6_default: txd6-default { ++ function = "UART6"; ++ groups = "TXD6"; ++ }; ++ ++ pinctrl_rxd6_default: rxd6-default { ++ function = "UART6"; ++ groups = "RXD6"; ++ }; ++ ++ pinctrl_txd7_default: txd7-default { ++ function = "UART7"; ++ groups = "TXD7"; ++ }; ++ ++ pinctrl_rxd7_default: rxd7-default { ++ function = "UART7"; ++ groups = "RXD7"; ++ }; ++ ++ pinctrl_txd8_default: txd8-default { ++ function = "UART8"; ++ groups = "TXD8"; ++ }; ++ ++ pinctrl_rxd8_default: rxd8-default { ++ function = "UART8"; ++ groups = "RXD8"; ++ }; ++ ++ pinctrl_txd9_default: txd9-default { ++ function = "UART9"; ++ groups = "TXD9"; ++ }; ++ ++ pinctrl_rxd9_default: rxd9-default { ++ function = "UART9"; ++ groups = "RXD9"; ++ }; ++ ++ pinctrl_txd10_default: txd10-default { ++ function = "UART10"; ++ groups = "TXD10"; ++ }; ++ ++ pinctrl_rxd10_default: rxd10-default { ++ function = "UART10"; ++ groups = "RXD10"; ++ }; ++ ++ pinctrl_txd11_default: txd11-default { ++ function = "UART11"; ++ groups = "TXD11"; ++ }; ++ ++ pinctrl_rxd11_default: rxd11-default { ++ function = "UART11"; ++ groups = "RXD11"; ++ }; ++ ++ pinctrl_pcierc2_perst_default: pcierc2-perst-default { ++ function = "PCIERC"; ++ groups = "PE2SGRSTN"; ++ }; ++ ++ pinctrl_usb2cud_default: usb2cud-default { ++ function = "USB2C"; ++ groups = "USB2CUD"; ++ }; ++ ++ pinctrl_usb2cd_default: usb2cd-default { ++ function = "USB2C"; ++ groups = "USB2CD"; ++ }; ++ ++ pinctrl_usb2ch_default: usb2ch-default { ++ function = "USB2C"; ++ groups = "USB2CH"; ++ }; ++ ++ pinctrl_usb2cu_default: usb2cu-default { ++ function = "USB2C"; ++ groups = "USB2CU"; ++ }; ++ ++ pinctrl_usb2dd_default: usb2dd-default { ++ function = "USB2D"; ++ groups = "USB2DD"; ++ }; ++ ++ pinctrl_usb2dh_default: usb2dh-default { ++ function = "USB2D"; ++ groups = "USB2DH"; ++ }; ++ ++ pinctrl_wdtrst0n_default: wdtrst0n-default { ++ function = "WDTRST0N"; ++ groups = "WDTRST0N"; ++ }; ++ ++ pinctrl_wdtrst1n_default: wdtrst1n-default { ++ function = "WDTRST1N"; ++ groups = "WDTRST1N"; ++ }; ++ ++ pinctrl_wdtrst2n_default: wdtrst2n-default { ++ function = "WDTRST2N"; ++ groups = "WDTRST2N"; ++ }; ++ ++ pinctrl_wdtrst3n_default: wdtrst3n-default { ++ function = "WDTRST3N"; ++ groups = "WDTRST3N"; ++ }; ++ ++ pinctrl_wdtrst4n_default: wdtrst4n-default { ++ function = "WDTRST4N"; ++ groups = "WDTRST4N"; ++ }; ++ ++ pinctrl_wdtrst5n_default: wdtrst5n-default { ++ function = "WDTRST5N"; ++ groups = "WDTRST5N"; ++ }; ++ ++ pinctrl_wdtrst6n_default: wdtrst6n-default { ++ function = "WDTRST6N"; ++ groups = "WDTRST6N"; ++ }; ++ ++ pinctrl_wdtrst7n_default: wdtrst7n-default { ++ function = "WDTRST7N"; ++ groups = "WDTRST7N"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,2870 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++#include ++#include ++#include ++#include ++#include ++ ++/ { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ interrupt-parent = <&gic>; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ serial2 = &uart2; ++ serial3 = &uart3; ++ serial4 = &uart4; ++ serial5 = &uart5; ++ serial6 = &uart6; ++ serial7 = &uart7; ++ serial8 = &uart8; ++ serial9 = &uart9; ++ serial10 = &uart10; ++ serial11 = &uart11; ++ serial12 = &uart12; ++ serial13 = &uart13; ++ serial14 = &uart14; ++ serial15 = &vuart0; ++ serial16 = &vuart1; ++ serial17 = &vuart2; ++ serial18 = &vuart3; ++ serial19 = &pcie_vuart0; ++ serial20 = &pcie_vuart1; ++ serial21 = &pcie_vuart2; ++ serial22 = &pcie_vuart3; ++ i2c0 = &i2c0; ++ i2c1 = &i2c1; ++ i2c2 = &i2c2; ++ i2c3 = &i2c3; ++ i2c4 = &i2c4; ++ i2c5 = &i2c5; ++ i2c6 = &i2c6; ++ i2c7 = &i2c7; ++ i2c8 = &i2c8; ++ i2c9 = &i2c9; ++ i2c10 = &i2c10; ++ i2c11 = &i2c11; ++ i2c12 = &i2c12; ++ i2c13 = &i2c13; ++ i2c14 = &i2c14; ++ i2c15 = &i2c15; ++ i3c0 = &i3c0; ++ i3c1 = &i3c1; ++ i3c2 = &i3c2; ++ i3c3 = &i3c3; ++ i3c4 = &i3c4; ++ i3c5 = &i3c5; ++ i3c6 = &i3c6; ++ i3c7 = &i3c7; ++ i3c8 = &i3c8; ++ i3c9 = &i3c9; ++ i3c10 = &i3c10; ++ i3c11 = &i3c11; ++ i3c12 = &i3c12; ++ i3c13 = &i3c13; ++ i3c14 = &i3c14; ++ i3c15 = &i3c15; ++ mdio0 = &mdio0; ++ mdio1 = &mdio1; ++ mdio2 = &mdio2; ++ mctp0 = &mctp0; ++ mctp1 = &mctp1; ++ mctp2 = &mctp2; ++ video0 = &video0; ++ video1 = &video1; ++ rvas0 = &rvas0; ++ rvas1 = &rvas1; ++ xdma0 = &xdma0; ++ xdma1 = &xdma1; ++ xdma2 = &xdma2; ++ bmcdev0 = &bmc_dev0; ++ bmcdev1 = &bmc_dev1; ++ edac = &edac; ++ }; ++ ++ cpus { ++ #address-cells = <2>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a35"; ++ reg = <0x0 0x0>; ++ enable-method = "psci"; ++ i-cache-size = <0x8000>; ++ i-cache-line-size = <64>; ++ i-cache-sets = <256>; ++ d-cache-size = <0x8000>; ++ d-cache-line-size = <64>; ++ d-cache-sets = <128>; ++ next-level-cache = <&l2>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a35"; ++ reg = <0x0 0x1>; ++ enable-method = "psci"; ++ i-cache-size = <0x8000>; ++ i-cache-line-size = <64>; ++ i-cache-sets = <256>; ++ d-cache-size = <0x8000>; ++ d-cache-line-size = <64>; ++ d-cache-sets = <128>; ++ next-level-cache = <&l2>; ++ }; ++ ++ cpu2: cpu@2 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a35"; ++ reg = <0x0 0x2>; ++ enable-method = "psci"; ++ i-cache-size = <0x8000>; ++ i-cache-line-size = <64>; ++ i-cache-sets = <256>; ++ d-cache-size = <0x8000>; ++ d-cache-line-size = <64>; ++ d-cache-sets = <128>; ++ next-level-cache = <&l2>; ++ }; ++ ++ cpu3: cpu@3 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a35"; ++ reg = <0x0 0x3>; ++ enable-method = "psci"; ++ i-cache-size = <0x8000>; ++ i-cache-line-size = <64>; ++ i-cache-sets = <256>; ++ d-cache-size = <0x8000>; ++ d-cache-line-size = <64>; ++ d-cache-sets = <128>; ++ next-level-cache = <&l2>; ++ }; ++ ++ l2: l2-cache0 { ++ compatible = "cache"; ++ cache-level = <2>; ++ cache-unified; ++ cache-size = <0x80000>; ++ cache-line-size = <64>; ++ cache-sets = <1024>; ++ }; ++ }; ++ ++ firmware { ++ optee: optee { ++ compatible = "linaro,optee-tz"; ++ method = "smc"; ++ }; ++ ++ psci { ++ compatible = "arm,psci-1.0", "arm,psci-0.2"; ++ method = "smc"; ++ }; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ atf: trusted-firmware-a@430000000 { ++ reg = <0x4 0x30000000 0x0 0x80000>; ++ no-map; ++ }; ++ ++ optee_core: optee-core@430080000 { ++ reg = <0x4 0x30080000 0x0 0x1000000>; ++ no-map; ++ }; ++ }; ++ ++ arm-pmu { ++ compatible = "arm,cortex-a35-pmu"; ++ interrupts = ; ++ }; ++ ++ timer { ++ compatible = "arm,armv8-timer"; ++ interrupts = , ++ , ++ , ++ ; ++ arm,cpu-registers-not-fw-configured; ++ always-on; ++ }; ++ ++ gic: interrupt-controller@12200000 { ++ compatible = "arm,gic-v3"; ++ reg = <0 0x12200000 0 0x10000>, /* GICD */ ++ <0 0x12280000 0 0x80000>, /* GICR */ ++ <0 0x40440000 0 0x1000>; /* GICC */ ++ interrupts = ; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ }; ++ ++ soc0: soc@10000000 { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ sram0: sram@10000000 { ++ compatible = "mmio-sram"; ++ reg = <0x0 0x10000000 0x0 0x20000>; ++ ranges = <0x0 0x0 0x0 0x10000000 0x0 0x20000>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ no-memory-wc; ++ ++ exported@0 { ++ reg = <0 0x0 0 0x20000>; ++ export; ++ }; ++ }; ++ ++ uphy3a: usb-phy3@12010000 { ++ compatible = "aspeed,ast2700-uphy3a"; ++ reg = <0x0 0x12010000 0x0 0xBC>; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_PHY3>; ++ aspeed,scu = <&syscon0>; ++ #phy-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ vhuba1: usb-vhub@12011000 { ++ compatible = "aspeed,ast2700-usb-vhuba1"; ++ reg = <0x0 0x12011000 0x0 0x820>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2axhpd1_default>; ++ aspeed,device = <&pcie_cfg0>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ uphy2a: usb-phy2@12011800 { ++ compatible = "aspeed,ast2700-uphy2a"; ++ reg = <0x0 0x12011800 0x0 0x10>; ++ ctrl = <0x4 0xbc001e00>; /* xHCI to vHub1 clock rate: 60MHz */ ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ uphy3b: usb-phy3@12020000 { ++ compatible = "aspeed,ast2700-uphy3b"; ++ reg = <0x0 0x12020000 0x0 0xBC>; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_PHY3>; ++ aspeed,scu = <&syscon0>; ++ #phy-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ vhubb1: usb-vhub@12021000 { ++ compatible = "aspeed,ast2700-usb-vhubb1"; ++ reg = <0x0 0x12021000 0x0 0x820>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2bxhpd1_default>; ++ aspeed,device = <&pcie_cfg1>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ uphy2b: usb-phy2@12021800 { ++ compatible = "aspeed,ast2700-uphy2b"; ++ reg = <0x0 0x12021800 0x0 0x10>; ++ ctrl = <0x4 0xbc001e00>; /* xHCI to vHub1 clock rate: 60MHz */ ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ xhci0: usb@12030000 { ++ compatible = "aspeed,ast2700-xhci", "snps,dwc3"; ++ reg = <0x0 0x12030000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>, ++ <&syscon0 SCU0_CLK_U2PHY_REFCLK>, ++ <&syscon0 SCU0_CLK_U2PHY_CLK12M>; ++ clock-names = "bus_early", "ref", "suspend"; ++ resets = <&syscon0 SCU0_RESET_PORTA_XHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb3axh_default &pinctrl_usb2axh_default>; ++ phys = <&uphy3a>; ++ phy-names = "usb3-phy"; ++ dr_mode = "host"; ++ status = "disabled"; ++ }; ++ ++ usb3ahp: usb3ahp { ++ compatible = "aspeed,ast2700-usb3ahp"; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_XHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default>; ++ aspeed,device = <&pcie_cfg0>; ++ phys = <&uphy3a>; ++ phy-names = "usb3-phy"; ++ status = "disabled"; ++ }; ++ ++ uhci0: usb@12040000 { ++ compatible = "aspeed,ast2700-uhci", "generic-uhci"; ++ reg = <0x0 0x12040000 0x0 0x100>; ++ interrupts = ; ++ #ports = <2>; ++ clocks = <&syscon0 SCU0_CLK_GATE_UHCICLK>; ++ resets = <&syscon0 SCU0_RESET_UHCI>; ++ status = "disabled"; ++ }; ++ ++ xhci1: usb@12050000 { ++ compatible = "aspeed,ast2700-xhci", "snps,dwc3"; ++ reg = <0x0 0x12050000 0x0 0x10000>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>, ++ <&syscon0 SCU0_CLK_U2PHY_REFCLK>, ++ <&syscon0 SCU0_CLK_U2PHY_CLK12M>; ++ clock-names = "bus_early", "ref", "suspend"; ++ resets = <&syscon0 SCU0_RESET_PORTB_XHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb3bxh_default &pinctrl_usb2bxh_default>; ++ phys = <&uphy3b>; ++ phy-names = "usb3-phy"; ++ dr_mode = "host"; ++ status = "disabled"; ++ }; ++ ++ usb3bhp: usb3bhp { ++ compatible = "aspeed,ast2700-usb3bhp"; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_XHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb3bxhp_default>; ++ aspeed,device = <&pcie_cfg1>; ++ phys = <&uphy3b>; ++ phy-names = "usb3-phy"; ++ status = "disabled"; ++ }; ++ ++ vhuba0: usb-vhub@12060000 { ++ compatible = "aspeed,ast2700-usb-vhuba0"; ++ reg = <0x0 0x12060000 0x0 0x820>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2ad0_default>; ++ aspeed,device = <&pcie_cfg0>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@12061000 { ++ compatible = "aspeed,ast2700-ehci", "generic-ehci"; ++ reg = <0x0 0x12061000 0x0 0x100>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2ah_default>; ++ status = "disabled"; ++ }; ++ ++ usb2ahp: usb2ahp { ++ compatible = "aspeed,ast2700-usb2ahp"; ++ reg = <0x0 0x12061800 0x0 0x90>; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2ahp_default>; ++ aspeed,device = <&pcie_cfg0>; ++ status = "disabled"; ++ }; ++ ++ vhubb0: usb-vhub@12062000 { ++ compatible = "aspeed,ast2700-usb-vhubb0"; ++ reg = <0x0 0x12062000 0x0 0x820>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2bd0_default>; ++ aspeed,device = <&pcie_cfg1>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ ehci1: usb@12063000 { ++ compatible = "aspeed,ast2700-ehci", "generic-ehci"; ++ reg = <0x0 0x12063000 0x0 0x100>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2bh_default>; ++ status = "disabled"; ++ }; ++ ++ usb2bhp: usb2bhp { ++ compatible = "aspeed,ast2700-usb2bhp"; ++ reg = <0x0 0x12063800 0x0 0x90>; ++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>; ++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2bhp_default>; ++ aspeed,device = <&pcie_cfg1>; ++ status = "disabled"; ++ }; ++ ++ hace: crypto@12070000 { ++ compatible = "aspeed,ast2700-hace"; ++ reg = <0x0 0x12070000 0x0 0x200>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_HACCLK>; ++ resets = <&syscon0 SCU0_RESET_HACE>; ++ status = "disabled"; ++ }; ++ ++ rsss: crypto@12080000 { ++ compatible = "aspeed,ast2700-rsss"; ++ reg = <0x0 0x12080000 0 0x1000>; ++ clocks = <&syscon0 SCU0_CLK_GATE_RSACLK>; ++ interrupts = ; ++ resets = <&syscon0 SCU0_RESET_RSA>, ++ <&syscon0 SCU0_RESET_SHA3>, ++ <&syscon0 SCU0_RESET_SM3>, ++ <&syscon0 SCU0_RESET_SM4>; ++ reset-names = "rsa", "sha3", "sm3", "sm4"; ++ status = "disabled"; ++ }; ++ ++ emmc_controller: sdc@12090000 { ++ compatible = "aspeed,ast2600-sd-controller"; ++ reg = <0 0x12090000 0 0x100>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0 0 0 0x12090000 0 0x10000>; ++ clocks = <&syscon0 SCU0_CLK_GATE_EMMCCLK>; ++ resets = <&syscon0 SCU0_RESET_EMMC>; ++ status = "disabled"; ++ ++ emmc: sdhci@12090100 { ++ compatible = "aspeed,ast2600-emmc"; ++ reg = <0 0x100 0 0x100>; ++ sdhci,auto-cmd12; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_EMMCCLK>; ++ pinctrl-names = "default"; ++ status = "disabled"; ++ }; ++ }; ++ ++ video0: video@120a0000 { ++ compatible = "aspeed,ast2700-video-engine", "syscon"; ++ reg = <0x0 0x120a0000 0x0 0x400>, <0x0 0x14c3a800 0x0 0x400>; ++ clocks = <&syscon0 SCU0_CLK_GATE_VCLK>, ++ <&syscon0 SCU0_CLK_GATE_ECLK>, ++ <&syscon0 SCU0_CLK_GATE_CRT1CLK>; ++ clock-names = "vclk", "eclk", "crt2clk"; ++ interrupts-extended = <&gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, <&intc1_5 22>; ++ resets = <&syscon0 SCU0_RESET_VIDEO>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ video1: video@120a1000 { ++ compatible = "aspeed,ast2700-video-engine"; ++ reg = <0x0 0x120a1000 0x0 0x400>; ++ clocks = <&syscon0 SCU0_CLK_GATE_VCLK>, ++ <&syscon0 SCU0_CLK_GATE_ECLK>, ++ <&syscon0 SCU0_CLK_GATE_CRT1CLK>; ++ clock-names = "vclk", "eclk", "crt2clk"; ++ interrupts = ; ++ resets = <&syscon0 SCU0_RESET_VIDEO>; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ rvas0: rvas@120b8000 { ++ compatible = "aspeed,ast2700-rvas"; ++ reg = <0x0 0x120b8000 0x0 0x800>, ++ <0x0 0x12c14000 0x0 0x800>, ++ <0x0 0x120a0000 0x0 0x1000>; ++ clocks = <&syscon0 SCU0_CLK_GATE_RVAS0CLK>, ++ <&syscon0 SCU0_CLK_GATE_VCLK>, ++ <&syscon0 SCU0_CLK_GATE_ECLK>; ++ clock-names = "rvasclk", "vclk", "eclk"; ++ interrupts = , , ; ++ resets = <&syscon0 SCU0_RESET_RVAS0>, <&syscon0 SCU0_RESET_VIDEO>; ++ status = "disabled"; ++ }; ++ ++ rvas1: rvas@120bc000 { ++ compatible = "aspeed,ast2700-rvas"; ++ reg = <0x0 0x120bc000 0x0 0x800>, ++ <0x0 0x12c14800 0x0 0x800>, ++ <0x0 0x120a1000 0x0 0x1000>; ++ clocks = <&syscon0 SCU0_CLK_GATE_RVAS1CLK>, ++ <&syscon0 SCU0_CLK_GATE_VCLK>, ++ <&syscon0 SCU0_CLK_GATE_ECLK>; ++ clock-names = "rvas2clk", "vclk", "eclk"; ++ interrupts = , , ; ++ resets = <&syscon0 SCU0_RESET_RVAS1>, <&syscon0 SCU0_RESET_VIDEO>; ++ status = "disabled"; ++ }; ++ ++ pcie0: pcie@120e0000 { ++ compatible = "aspeed,ast2700-pcie"; ++ device_type = "pci"; ++ reg = <0x0 0x120e0000 0x0 0x100>; ++ linux,pci-domain = <0>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ bus-range = <0x00 0xff>; ++ ranges = <0x01000000 0x0 0x00000000 0x0 0x00000000 0x0 0x00008000>, /* I/O */ ++ <0x02000000 0x0 0x60000000 0x0 0x60000000 0x0 0x20000000>; /* memory */ ++ interrupts = ; ++ resets = <&syscon0 SCU0_RESET_H2X0>, ++ <&syscon0 SCU0_RESET_PCIE0RST>; ++ reset-names = "h2x", "perst"; ++ status = "disabled"; ++ ++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>; ++ pinctrl-0 = <&pinctrl_pcierc0_perst_default>; ++ pinctrl-names = "default"; ++ ++ #interrupt-cells = <1>; ++ msi-controller; ++ ++ aspeed,pciephy = <&pcie_phy0>; ++ aspeed,pciecfg = <&pcie_cfg0>; ++ ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0 0 0 1 &pcie_intc0 0>, ++ <0 0 0 2 &pcie_intc0 1>, ++ <0 0 0 3 &pcie_intc0 2>, ++ <0 0 0 4 &pcie_intc0 3>; ++ pcie_intc0: interrupt-controller { ++ interrupt-controller; ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ pcie1: pcie@120f0000 { ++ compatible = "aspeed,ast2700-pcie"; ++ device_type = "pci"; ++ reg = <0x0 0x120f0000 0x0 0x100>; ++ linux,pci-domain = <1>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ bus-range = <0x00 0xff>; ++ ranges = <0x01000000 0 0x00000000 0x0 0x00000000 0x0 0x00008000>, ++ <0x02000000 0 0x80000000 0x0 0x80000000 0x0 0x20000000>; /* memory */ ++ interrupts = ; ++ resets = <&syscon0 SCU0_RESET_H2X1>, ++ <&syscon0 SCU0_RESET_PCIE1RST>; ++ reset-names = "h2x", "perst"; ++ status = "disabled"; ++ ++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>; ++ pinctrl-0 = <&pinctrl_pcierc1_perst_default>; ++ pinctrl-names = "default"; ++ ++ #interrupt-cells = <1>; ++ msi-controller; ++ ++ aspeed,pciephy = <&pcie_phy1>; ++ aspeed,pciecfg = <&pcie_cfg1>; ++ ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0 0 0 1 &pcie_intc1 0>, ++ <0 0 0 2 &pcie_intc1 1>, ++ <0 0 0 3 &pcie_intc1 2>, ++ <0 0 0 4 &pcie_intc1 3>; ++ pcie_intc1: interrupt-controller { ++ interrupt-controller; ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ xdma0: xdma@12c04000 { ++ compatible = "aspeed,ast2700-xdma0"; ++ reg = <0x0 0x12c04000 0x0 0x100>; ++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>; ++ resets = <&syscon0 SCU0_RESET_XDMA0>; ++ reset-names = "device"; ++ interrupts-extended = <&gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, ++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>; ++ aspeed,pcie-device = "bmc"; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ xdma1: xdma@12c05000 { ++ compatible = "aspeed,ast2700-xdma1"; ++ reg = <0x0 0x12c05000 0x0 0x100>; ++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>; ++ resets = <&syscon0 SCU0_RESET_XDMA1>; ++ reset-names = "device"; ++ interrupts-extended = <&gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, ++ <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>; ++ aspeed,pcie-device = "bmc"; ++ aspeed,scu = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ intc0_11: interrupt-controller@12101b00 { ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x12101b00 0x0 0x10>; ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++ ++ bmc_dev0: bmc-dev@12110000 { ++ compatible = "aspeed,ast2700-bmc-device"; ++ reg = <0x0 0x12110000 0x0 0xb000>; ++ interrupts-extended = <&gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>, ++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>; ++ aspeed,device = <&pcie_cfg0>; ++ aspeed,e2m = <&e2m_config0>; ++ aspeed,scu = <&syscon0>; ++ pcie2lpc; ++ status = "disabled"; ++ }; ++ ++ bmc_dev1: bmc-dev@12120000 { ++ compatible = "aspeed,ast2700-bmc-device"; ++ reg = <0x0 0x12120000 0x0 0xb000>; ++ interrupts-extended = <&gic GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>, ++ <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>; ++ aspeed,device = <&pcie_cfg1>; ++ aspeed,e2m = <&e2m_config1>; ++ aspeed,scu = <&syscon0>; ++ pcie2lpc; ++ status = "disabled"; ++ }; ++ ++ edac: sdram@12c00000 { ++ compatible = "aspeed,ast2700-sdram-edac", "syscon"; ++ reg = <0 0x12c00000 0 0x1000>; ++ interrupts = ; ++ }; ++ ++ syscon0: syscon@12c02000 { ++ compatible = "aspeed,ast2700-scu0", "syscon", "simple-mfd"; ++ reg = <0x0 0x12c02000 0x0 0x1000>; ++ ranges = <0x0 0x0 0x12c02000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ ++ silicon-id@0 { ++ compatible = "aspeed,ast2700-silicon-id", "aspeed,silicon-id"; ++ reg = <0x0 0x4>; ++ }; ++ ++ scu_ic0: interrupt-controller@1D0 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-scu-ic0"; ++ reg = <0x1d0 0xc>; ++ interrupts = ; ++ interrupt-controller; ++ }; ++ ++ scu_ic1: interrupt-controller@1E0 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-scu-ic1"; ++ reg = <0x1e0 0xc>; ++ interrupts = ; ++ interrupt-controller; ++ }; ++ ++ pinctrl0: pinctrl@400 { ++ compatible = "aspeed,ast2700-soc0-pinctrl"; ++ reg = <0x400 0x600>; ++ }; ++ }; ++ ++ pcie_cfg0: syscon@12c02a00 { ++ compatible = "syscon"; ++ reg = <0 0x12c02a00 0 0x80>; ++ }; ++ ++ pcie_cfg1: syscon@12c02a80 { ++ compatible = "syscon"; ++ reg = <0 0x12c02a80 0 0x80>; ++ }; ++ ++ mctp0: mctp0@12c06000 { ++ compatible = "aspeed,ast2700-mctp0"; ++ reg = <0x0 0x12c06000 0x0 0x40>; ++ interrupts-extended = <&gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>; ++ interrupt-names = "mctp", "pcie"; ++ resets = <&syscon0 SCU0_RESET_MCTP0>; ++ aspeed,scu = <&syscon0>; ++ aspeed,pcieh = <&pcie_phy0>; ++ status = "disabled"; ++ }; ++ ++ mctp1: mctp1@12c07000 { ++ compatible = "aspeed,ast2700-mctp1"; ++ reg = <0x0 0x12c07000 0x0 0x40>; ++ interrupts-extended = <&gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>; ++ interrupt-names = "mctp", "pcie"; ++ resets = <&syscon0 SCU0_RESET_MCTP1>; ++ aspeed,scu = <&syscon0>; ++ aspeed,pcieh = <&pcie_phy1>; ++ status = "disabled"; ++ }; ++ ++ ufs_controller: cnr@12c08000 { ++ compatible = "aspeed,ast2700-ufscnr"; ++ reg = <0 0x12c08000 0 0x100>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0 0 0 0x12c08000 0 0x300>; ++ clocks = <&syscon0 SCU0_CLK_GATE_UFSCLK>; ++ resets = <&syscon0 SCU0_RESET_UFS>; ++ status = "disabled"; ++ ++ ufs: ufshc@12c08200 { ++ compatible = "aspeed,ast2700-ufshc"; ++ reg = <0 0x200 0 0x100>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_AXI1>; ++ clock-names = "core_clk"; ++ freq-table-hz = <0 0>; ++ status = "disabled"; ++ }; ++ }; ++ ++ display_port: dp@12c0a000 { ++ compatible = "aspeed,ast2700-displayport", "syscon"; ++ reg = <0x0 0x12c0a000 0x0 0x200>; ++ status = "disabled"; ++ }; ++ ++ display_port_mcu: dpmcu@11000000 { ++ compatible = "aspeed,ast2700-displayport-mcu", "syscon"; ++ reg = <0x0 0x11000000 0x0 0xf00>; ++ status = "disabled"; ++ }; ++ ++ gfx: display@12c09000 { ++ compatible = "aspeed,ast2700-gfx", "syscon"; ++ reg = <0 0x12c09000 0 0x100>; ++ reg-io-width = <4>; ++ clocks = <&syscon0 SCU0_CLK_GATE_CRT0CLK>; ++ resets = <&syscon0 SCU0_RESET_CRT0>; ++ syscon = <&syscon0>; ++ status = "disabled"; ++ interrupts-extended = <&gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, ++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>, ++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO>; ++ }; ++ ++ disp_intf: disp-intf { ++ compatible = "aspeed,ast2700-disp-intf", "syscon"; ++ reg = <0x0 0x12c1d000 0x0 0x40>; ++ syscon = <&syscon0>; ++ status = "disabled"; ++ }; ++ ++ rtc: rtc@12c0f000 { ++ compatible = "aspeed,ast2700-rtc"; ++ reg = <0 0x12c0f000 0 0x18>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio0: gpio@12c11000 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-gpio"; ++ reg = <0x0 0x12c11000 0x0 0x1000>; ++ interrupts = ; ++ gpio-ranges = <&pinctrl0 0 0 12>; ++ ngpios = <12>; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ pcie_phy0: phy@12c15000 { ++ compatible = "aspeed,ast2700-pcie-phy", "syscon"; ++ reg = <0x0 0x12c15000 0x0 0x800>; ++ }; ++ ++ pcie_phy1: phy@12c15800 { ++ compatible = "aspeed,ast2700-pcie-phy", "syscon"; ++ reg = <0x0 0x12c15800 0x0 0x800>; ++ }; ++ ++ pcie_vuart0: serial@12c18000 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x12c18000 0x0 0x40>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ pcie_vuart1: serial@12c18100 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x12c18100 0x0 0x40>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ pcie_vuart2: serial@12c18200 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x12c18200 0x0 0x40>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ pcie_vuart3: serial@12c18300 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x12c18300 0x0 0x40>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0: pcie-lpc@12c19000 { ++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon"; ++ reg = <0x0 0x12c19000 0x0 0x800>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x12c19000 0x800>; ++ ++ pcie_lpc0_kcs0: pcie-kcs@24 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_kcs1: pcie-kcs@28 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_kcs2: pcie-kcs@2c { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_kcs3: pcie-kcs@114 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_snoop: pcie-snoop@80 { ++ compatible = "aspeed,ast2600-lpc-snoop"; ++ reg = <0x80 0x80>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_pcc: pcie-pcc@0 { ++ compatible = "aspeed,ast2600-lpc-pcc"; ++ reg = <0x0 0x140>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc0_ibt: pcie-ibt@140 { ++ compatible = "aspeed,ast2600-ibt-bmc"; ++ reg = <0x140 0x18>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ }; ++ ++ pcie_lpc1: pcie-lpc@12c19800 { ++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon"; ++ reg = <0x0 0x12c19800 0x0 0x800>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x12c19800 0x800>; ++ ++ pcie_lpc1_kcs0: pcie-kcs@24 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_kcs1: pcie-kcs@28 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_kcs2: pcie-kcs@2c { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_kcs3: pcie-kcs@114 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_snoop: pcie-snoop@80 { ++ compatible = "aspeed,ast2600-lpc-snoop"; ++ reg = <0x80 0x80>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_pcc: pcie-pcc@0 { ++ compatible = "aspeed,ast2600-lpc-pcc"; ++ reg = <0x0 0x140>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ pcie_lpc1_ibt: pcie-ibt@140 { ++ compatible = "aspeed,ast2600-ibt-bmc"; ++ reg = <0x140 0x18>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ }; ++ ++ uart4: serial@12c1a000 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x12c1a000 0x0 0x1000>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon0 SCU0_CLK_GATE_UART4CLK>; ++ interrupts = ; ++ no-loopback-test; ++ status = "disabled"; ++ }; ++ ++ mbox0: mbox@12c1c200 { ++ compatible = "aspeed,ast2700-mailbox"; ++ reg = <0x0 0x12c1c200 0x0 0x100>, <0x0 0x12c1c300 0x0 0x100>; ++ reg-names = "tx", "rx"; ++ interrupts = ; ++ #mbox-cells = <1>; ++ mbox-name = "ssp"; ++ }; ++ ++ mbox1: mbox@12c1c600 { ++ compatible = "aspeed,ast2700-mailbox"; ++ reg = <0x0 0x12c1c600 0x0 0x100>, <0x0 0x12c1c700 0x0 0x100>; ++ reg-names = "tx", "rx"; ++ interrupts = ; ++ #mbox-cells = <1>; ++ mbox-name = "tsp"; ++ }; ++ ++ ecdsa: crypto@12c1e000 { ++ compatible = "aspeed,ast2700-ecdsa"; ++ reg = <0x0 0x12c1e000 0x0 0x1000>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_GATE_ECDSACLK>; ++ resets = <&syscon0 SCU0_RESET_ECC>; ++ status = "disabled"; ++ }; ++ ++ jtag0: jtag@12c20000 { ++ compatible = "aspeed,ast2700-jtag"; ++ reg= <0x0 0x12c20000 0x0 0x40>; ++ interrupts = ; ++ clocks = <&syscon0 SCU0_CLK_AHB>; ++ resets = <&syscon0 SCU0_RESET_JTAG>; ++ pinctrl-0 = <&pinctrl_jtagm0_default>; ++ pinctrl-names = "default"; ++ status = "disabled"; ++ }; ++ ++ e2m_config0: e2m-config@12c21000 { ++ compatible = "syscon", "simple-mfd"; ++ reg = <0 0x12c21000 0 0x300>; ++ ranges = <0 0 0 0x12c21000 0 0x300>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ aspeed,device = <&pcie_cfg0>; ++ ++ e2m_ic0: interrupt-controller@14 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-e2m-ic"; ++ reg = <0 0x14 0 0x8>; ++ interrupts-extended = <&gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "e2m"; ++ interrupt-controller; ++ }; ++ ++ pcie0_mmbi0: pcie0-mmbi@0 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT0>; ++ index = <0>; ++ pid = <3>; ++ bar = <0x3c>; ++ msi = <28>; ++ status = "disabled"; ++ }; ++ ++ pcie0_mmbi1: pcie0-mmbi@1 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT1>; ++ index = <1>; ++ pid = <4>; ++ bar = <0x4c>; ++ msi = <29>; ++ status = "disabled"; ++ }; ++ ++ pcie0_mmbi2: pcie0-mmbi@2 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT2>; ++ index = <2>; ++ pid = <5>; ++ bar = <0x5c>; ++ msi = <30>; ++ status = "disabled"; ++ }; ++ ++ pcie0_mmbi3: pcie0-mmbi@3 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT3>; ++ index = <3>; ++ pid = <6>; ++ bar = <0x6c>; ++ msi = <31>; ++ status = "disabled"; ++ }; ++ }; ++ ++ e2m_config1: e2m-config@12c22000 { ++ compatible = "syscon", "simple-mfd"; ++ reg = <0 0x12c22000 0 0x300>; ++ ranges = <0x0 0x0 0 0x12c22000 0 0x300>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ aspeed,device = <&pcie_cfg1>; ++ ++ e2m_ic1: interrupt-controller@14 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-e2m-ic"; ++ reg = <0 0x14 0 0x8>; ++ interrupts-extended = <&gic GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-controller; ++ }; ++ ++ pcie1_mmbi4: pcie1-mmbi@4 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT4>; ++ index = <4>; ++ pid = <11>; ++ bar = <0x3c>; ++ msi = <28>; ++ status = "disabled"; ++ }; ++ ++ pcie1_mmbi5: pcie1-mmbi@5 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT5>; ++ index = <5>; ++ pid = <12>; ++ bar = <0x4c>; ++ msi = <29>; ++ status = "disabled"; ++ }; ++ ++ pcie1_mmbi6: pcie1-mmbi@6 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT6>; ++ index = <6>; ++ pid = <13>; ++ bar = <0x5c>; ++ msi = <30>; ++ status = "disabled"; ++ }; ++ ++ pcie1_mmbi7: pcie1-mmbi@7 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT7>; ++ index = <7>; ++ pid = <14>; ++ bar = <0x6c>; ++ msi = <31>; ++ status = "disabled"; ++ }; ++ }; ++ }; ++ ++ soc1: soc@14000000 { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ fmc: spi@14000000 { ++ reg = <0x0 0x14000000 0x0 0xc4>, <0x1 0x00000000 0x0 0x80000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "aspeed,ast2700-fmc"; ++ status = "disabled"; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ interrupts-extended = <&intc1_3 25>; ++ num-cs = <3>; ++ ++ flash@0 { ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ ++ flash@1 { ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ ++ flash@2 { ++ reg = < 2 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ }; ++ ++ spi0: spi@14010000 { ++ reg = <0x0 0x14010000 0x0 0xc4>, <0x1 0x80000000 0x0 0x80000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "aspeed,ast2700-spi"; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ interrupts-extended = <&intc1_3 26>; ++ status = "disabled"; ++ num-cs = <2>; ++ ++ flash@0 { ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ ++ flash@1 { ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ }; ++ ++ spi1: spi@14020000 { ++ reg = <0x0 0x14020000 0x0 0xc4>, <0x2 0x00000000 0x0 0x80000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "aspeed,ast2700-spi"; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ interrupts-extended = <&intc1_3 27>; ++ status = "disabled"; ++ num-cs = <2>; ++ ++ flash@0 { ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ ++ flash@1 { ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ spi-max-frequency = <50000000>; ++ spi-rx-bus-width = <2>; ++ status = "disabled"; ++ }; ++ }; ++ ++ spi2: spi@14030000 { ++ reg = <0x0 0x14030000 0x0 0x1f0>, <0x2 0x80000000 0x0 0x80000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ resets = <&syscon1 SCU1_RESET_SPI2>; ++ interrupts-extended = <&intc1_3 28>; ++ num-cs = <2>; ++ status = "disabled"; ++ }; ++ ++ can0: can@14c3e000 { ++ reg = <0x0 0x14c3e000 0x0 0x2000>; ++ compatible = "aspeed,canfd"; ++ status = "disabled"; ++ clocks = <&syscon1 SCU1_CLK_GATE_CANCLK>; ++ resets = <&syscon1 SCU1_RESET_CAN>; ++ interrupts-extended = <&intc1_3 31>; ++ pinctrl-0 = <&pinctrl_can_default>; ++ pinctrl-names = "default"; ++ }; ++ ++ mdio0: mdio@14040000 { ++ compatible = "aspeed,ast2700-mdio", "aspeed,ast2600-mdio"; ++ reg = <0 0x14040000 0 0x8>; ++ resets = <&syscon1 SCU1_RESET_MII>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_mdio0_default>; ++ status = "disabled"; ++ }; ++ ++ mdio1: mdio@14040008 { ++ compatible = "aspeed,ast2700-mdio", "aspeed,ast2600-mdio"; ++ reg = <0 0x14040008 0 0x8>; ++ resets = <&syscon1 SCU1_RESET_MII>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_mdio1_default>; ++ status = "disabled"; ++ }; ++ ++ mdio2: mdio@14040010 { ++ compatible = "aspeed,ast2700-mdio", "aspeed,ast2600-mdio"; ++ reg = <0 0x14040010 0 0x8>; ++ resets = <&syscon1 SCU1_RESET_MII>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_mdio2_default>; ++ status = "disabled"; ++ }; ++ ++ sgmii: phy@14C01000 { ++ compatible = "aspeed,ast2700-sgmii"; ++ reg = <0x0 0x14c01000 0x0 0x40>; ++ ++ aspeed,plda = <&pcie_phy2>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sgmii_default>; ++ #phy-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ mac0: ethernet@14050000 { ++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100"; ++ reg = <0x0 0x14050000 0x0 0x200>; ++ interrupts-extended = <&intc1_4 0>; ++ ++ clocks = <&syscon1 SCU1_CLK_GATE_MAC0CLK>; ++ resets = <&syscon1 SCU1_RESET_MAC0>; ++ status = "disabled"; ++ }; ++ ++ mac1: ethernet@14060000 { ++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100"; ++ reg = <0x0 0x14060000 0x0 0x200>; ++ interrupts-extended = <&intc1_4 1>; ++ ++ clocks = <&syscon1 SCU1_CLK_GATE_MAC1CLK>; ++ resets = <&syscon1 SCU1_RESET_MAC1>; ++ status = "disabled"; ++ }; ++ ++ mac2: ethernet@14070000 { ++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100"; ++ reg = <0x0 0x14070000 0x0 0x200>; ++ interrupts-extended = <&intc1_4 2>; ++ ++ phys = <&sgmii>; ++ phy-names = "sgmii"; ++ ++ clocks = <&syscon1 SCU1_CLK_GATE_MAC2CLK>; ++ resets = <&syscon1 SCU1_RESET_MAC2>; ++ status = "disabled"; ++ }; ++ ++ sdio_controller: sdc@14080000 { ++ compatible = "aspeed,ast2600-sd-controller"; ++ reg = <0 0x14080000 0 0x100>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0 0 0 0x14080000 0 0x10000>; ++ clocks = <&syscon1 SCU1_CLK_GATE_SDCLK>; ++ resets = <&syscon1 SCU1_RESET_SD>; ++ status = "disabled"; ++ ++ sdhci: sdhci@14080100 { ++ compatible = "aspeed,ast2600-sdhci", "sdhci"; ++ reg = <0 0x100 0 0x100>; ++ sdhci,auto-cmd12; ++ interrupts-extended = <&intc1_5 1>; ++ clocks = <&syscon1 SCU1_CLK_GATE_SDCLK>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sd_default>; ++ status = "disabled"; ++ }; ++ }; ++ ++ pwm_tach: pwm-tach-controller@140c0000 { ++ compatible = "aspeed,ast2700-pwm-tach"; ++ reg = <0x0 0x140c0000 0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ resets = <&syscon1 SCU1_RESET_PWM>; ++ #pwm-cells = <3>; ++ status = "disabled"; ++ }; ++ ++ pcie2: pcie@140d0000 { ++ compatible = "aspeed,ast2700-pcie"; ++ device_type = "pci"; ++ reg = <0x0 0x140d0000 0x0 0x100>; ++ linux,pci-domain = <2>; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ bus-range = <0x00 0xff>; ++ ranges = <0x01000000 0 0x00000000 0x0 0x00000000 0x0 0x00008000>, ++ <0x02000000 0 0xa0000000 0x0 0xa0000000 0x0 0x20000000>; /* memory */ ++ interrupts-extended = <&intc1_4 31>; ++ resets = <&syscon1 SCU1_RESET_H2X>, ++ <&syscon1 SCU1_RESET_PCIE2RST>; ++ reset-names = "h2x", "perst"; ++ status = "disabled"; ++ ++ clocks = <&syscon1 SCU1_CLK_GATE_PCICLK>; ++ pinctrl-0 = <&pinctrl_pcierc2_perst_default>; ++ pinctrl-names = "default"; ++ ++ #interrupt-cells = <1>; ++ msi-controller; ++ ++ aspeed,pciephy = <&pcie_phy2>; ++ aspeed,pciecfg = <&pcie_cfg2>; ++ ++ interrupt-map-mask = <0 0 0 7>; ++ interrupt-map = <0 0 0 1 &pcie_intc2 0>, ++ <0 0 0 2 &pcie_intc2 1>, ++ <0 0 0 3 &pcie_intc2 2>, ++ <0 0 0 4 &pcie_intc2 3>; ++ pcie_intc2: interrupt-controller { ++ interrupt-controller; ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ uhci1: usb@14110000 { ++ compatible = "aspeed,ast2700-uhci", "generic-uhci"; ++ reg = <0x0 0x14110000 0x0 0x100>; ++ interrupts-extended = <&intc1_4 27>; ++ #ports = <2>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UHCICLK>; ++ resets = <&syscon1 SCU1_RESET_UHCI>; ++ status = "disabled"; ++ }; ++ ++ vhubc: usb-vhub@14120000 { ++ compatible = "aspeed,ast2700-usb-vhubc"; ++ reg = <0x0 0x14120000 0x0 0x820>; ++ interrupts-extended = <&intc1_4 28>; ++ clocks = <&syscon1 SCU1_CLK_GATE_PORTCUSB2CLK>; ++ resets = <&syscon1 SCU1_RESET_PORTC_VHUB_EHCI>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2cd_default>; ++ status = "disabled"; ++ }; ++ ++ ehci2: usb@14121000 { ++ compatible = "aspeed,ast2700-ehci", "generic-ehci"; ++ reg = <0x0 0x14121000 0x0 0x100>; ++ interrupts-extended = <&intc1_4 28>; ++ clocks = <&syscon1 SCU1_CLK_GATE_PORTCUSB2CLK>; ++ resets = <&syscon1 SCU1_RESET_PORTC_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2ch_default>; ++ status = "disabled"; ++ }; ++ ++ vhubd: usb-vhub@14122000 { ++ compatible = "aspeed,ast2700-usb-vhubd"; ++ reg = <0x0 0x14122000 0x0 0x820>; ++ interrupts-extended = <&intc1_4 29>; ++ clocks = <&syscon1 SCU1_CLK_GATE_PORTDUSB2CLK>; ++ resets = <&syscon1 SCU1_RESET_PORTD_VHUB_EHCI>; ++ aspeed,vhub-downstream-ports = <7>; ++ aspeed,vhub-generic-endpoints = <21>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2dd_default>; ++ status = "disabled"; ++ }; ++ ++ ehci3: usb@14123000 { ++ compatible = "aspeed,ast2700-ehci", "generic-ehci"; ++ reg = <0x0 0x14123000 0x0 0x100>; ++ interrupts-extended = <&intc1_4 29>; ++ clocks = <&syscon1 SCU1_CLK_GATE_PORTDUSB2CLK>; ++ resets = <&syscon1 SCU1_RESET_PORTD_VHUB_EHCI>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_usb2dh_default>; ++ status = "disabled"; ++ }; ++ ++ sram1: sram@14b80000 { ++ compatible = "mmio-sram"; ++ reg = <0x0 0x14b80000 0x0 0x40000>; ++ ranges = <0x0 0x0 0x0 0x14b80000 0x0 0x40000>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ no-memory-wc; ++ ++ exported@0 { ++ reg = <0 0x0 0 0x40000>; ++ export; ++ }; ++ }; ++ ++ adc0: adc@14c00000 { ++ compatible = "aspeed,ast2700-adc0"; ++ reg = <0x0 0x14c00000 0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ resets = <&syscon1 SCU1_RESET_ADC>; ++ interrupts-extended = <&intc1_2 16>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <&syscon1>; ++ status = "disabled"; ++ }; ++ ++ adc1: adc@14c00100 { ++ compatible = "aspeed,ast2700-adc1"; ++ reg = <0x0 0x14c00100 0x0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ resets = <&syscon1 SCU1_RESET_ADC>; ++ interrupts-extended = <&intc1_2 16>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <&syscon1>; ++ status = "disabled"; ++ }; ++ ++ syscon1: syscon@14c02000 { ++ compatible = "aspeed,ast2700-scu1", "syscon", "simple-mfd"; ++ reg = <0x0 0x14c02000 0x0 0x1000>; ++ ranges = <0x0 0x0 0x14c02000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ ++ scu_ic2: interrupt-controller@100 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-scu-ic2"; ++ reg = <0x100 0x8>; ++ interrupts-extended = <&intc1_5 0>; ++ interrupt-controller; ++ }; ++ ++ scu_ic3: interrupt-controller@108 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-scu-ic3"; ++ reg = <0x108 0x8>; ++ interrupts-extended = <&intc1_5 26>; ++ interrupt-controller; ++ }; ++ ++ pinctrl1: pinctrl@400 { ++ compatible = "aspeed,ast2700-soc1-pinctrl"; ++ reg = <0x400 0x100>; ++ }; ++ }; ++ ++ rng: hwrng@14c020f0 { ++ compatible = "aspeed,ast2700-trng"; ++ reg = <0x0 0x14c020f0 0x0 0x8>; ++ status = "disabled"; ++ }; ++ ++ pcie_cfg2: syscon@14c02a80 { ++ compatible = "syscon"; ++ reg = <0 0x14c02a80 0 0x80>; ++ }; ++ ++ chassis: chassis@14c04010 { ++ compatible = "aspeed,ast2600-chassis"; ++ reg = <0 0x14c04010 0 0x4>; ++ interrupts-extended = <&intc1_5 5>; ++ status = "disabled"; ++ }; ++ ++ backed_sram0: sram@14c04100 { ++ compatible = "mmio-sram"; ++ reg = <0x0 0x14c04100 0x0 0x80>; ++ ranges = <0x0 0x0 0x0 0x14c04100 0x0 0x80>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ no-memory-wc; ++ ++ exported@0 { ++ reg = <0 0x0 0 0x80>; ++ export; ++ }; ++ }; ++ ++ backed_sram1: sram@14c04300 { ++ compatible = "mmio-sram"; ++ reg = <0x0 0x14c04300 0x0 0x80>; ++ ranges = <0x0 0x0 0x0 0x14c04300 0x0 0x80>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ no-memory-wc; ++ ++ exported@0 { ++ reg = <0 0x0 0 0x80>; ++ export; ++ }; ++ }; ++ ++ backed_sram2: sram@14c04500 { ++ compatible = "mmio-sram"; ++ reg = <0x0 0x14c04500 0x0 0x40>; ++ ranges = <0x0 0x0 0x0 0x14c04500 0x0 0x40>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ no-memory-wc; ++ ++ exported@0 { ++ reg = <0 0x0 0 0x40>; ++ export; ++ }; ++ }; ++ ++ espi0: espi@14c05000 { ++ compatible = "aspeed,ast2700-espi"; ++ reg = <0 0x14c05000 0 0x1000>; ++ interrupts-extended = <&intc1_0 10>, <&intc1_0 16>; ++ clocks = <&syscon1 SCU1_CLK_GATE_ESPI0CLK>; ++ resets = <&syscon1 SCU1_RESET_ESPI0>; ++ syscon = <&syscon1>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_espi0_default>; ++ status = "disabled"; ++ }; ++ ++ espi1: espi@14c06000 { ++ compatible = "aspeed,ast2700-espi"; ++ reg = <0 0x14c06000 0 0x1000>; ++ interrupts-extended = <&intc1_1 10>, <&intc1_1 16>; ++ clocks = <&syscon1 SCU1_CLK_GATE_ESPI1CLK>; ++ resets = <&syscon1 SCU1_RESET_ESPI1>; ++ syscon = <&syscon1>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_espi1_default>; ++ status = "disabled"; ++ }; ++ ++ otp: otp@14c07000 { ++ compatible = "aspeed,ast2700-otp"; ++ reg = <0 0x14c07000 0 0x1000>; ++ interrupts-extended = <&intc1_5 8>; ++ aspeed,scu0 = <&syscon0>; ++ aspeed,scu1 = <&syscon1>; ++ status = "disabled"; ++ }; ++ ++ jtag1: jtag@14c09000 { ++ compatible = "aspeed,ast2700-jtag"; ++ reg= <0x0 0x14c09000 0x0 0x40>; ++ interrupts-extended = <&intc1_5 2>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ resets = <&syscon1 SCU1_RESET_JTAG1>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_jtagm1_default>; ++ status = "disabled"; ++ }; ++ ++ gpio1: gpio@14c0b000 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-gpio"; ++ reg = <0x0 0x14c0b000 0x0 0x1000>; ++ interrupts-extended = <&intc1_2 18>; ++ gpio-ranges = <&pinctrl1 0 0 216>; ++ ngpios = <216>; ++ clocks = <&syscon1 SCU1_CLK_AHB>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ }; ++ ++ sgpiom0: sgpiom@14c0c000 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-sgpiom"; ++ reg = <0x0 0x14c0c000 0x0 0x100>; ++ interrupts-extended = <&intc1_2 21>; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ ngpios = <256>; ++ bus-frequency = <12000000>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sgpm0_default>; ++ status = "disabled"; ++ }; ++ ++ sgpiom1: sgpiom@14c0d000 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-sgpiom"; ++ reg = <0x0 0x14c0d000 0x0 0x100>; ++ interrupts-extended = <&intc1_2 24>; ++ #interrupt-cells = <2>; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ interrupt-controller; ++ ngpios = <256>; ++ bus-frequency = <12000000>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sgpm1_default>; ++ status = "disabled"; ++ }; ++ ++ i2c: bus@14c0f000 { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x14c0f000 0x1100>; ++ }; ++ ++ udma: uart-dma@14c12000 { ++ compatible = "aspeed,ast2700-udma"; ++ reg = <0x0 0x14c12000 0x0 0x1000>; ++ interrupts-extended = <&intc1_4 19>; ++ }; ++ ++ intc1: interrupt-controller@14c18000 { ++ compatible = "aspeed,ast2700-intc1"; ++ reg = <0 0x14c18000 0 0x400>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x14c18000 0x400>; ++ ++ intc1_0: interrupt-controller@100 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x100 0x10>; ++ interrupts-extended = <&intc0_11 0>; ++ }; ++ ++ intc1_1: interrupt-controller@110 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x110 0x10>; ++ interrupts-extended = <&intc0_11 1>; ++ }; ++ ++ intc1_2: interrupt-controller@120 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x120 0x10>; ++ interrupts-extended = <&intc0_11 2>; ++ }; ++ ++ intc1_3: interrupt-controller@130 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x130 0x10>; ++ interrupts-extended = <&intc0_11 3>; ++ }; ++ ++ intc1_4: interrupt-controller@140 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x140 0x10>; ++ interrupts-extended = <&intc0_11 4>; ++ }; ++ ++ intc1_5: interrupt-controller@150 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x150 0x10>; ++ interrupts-extended = <&intc0_11 5>; ++ }; ++ }; ++ ++ xdma2: xdma@14c19000 { ++ compatible = "aspeed,ast2700-xdma1"; ++ reg = <0x0 0x14c19000 0x0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_GATE_PCICLK>; ++ resets = <&syscon1 SCU1_RESET_XDMA>; ++ reset-names = "device"; ++ interrupts-extended = <&intc1_4 21>, <&scu_ic2 ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI>; ++ aspeed,pcie-device = "bmc"; ++ aspeed,scu = <&syscon1>; ++ status = "disabled"; ++ }; ++ ++ mctp2: mctp2@14c1a000 { ++ compatible = "aspeed,ast2700-mctp1"; ++ reg = <0x0 0x14c1a000 0x0 0x40>; ++ interrupts-extended = <&intc1_4 3>, <&scu_ic2 ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI>; ++ interrupt-names = "mctp", "pcie"; ++ resets = <&syscon1 SCU1_RESET_MCTP>; ++ aspeed,scu = <&syscon1>; ++ aspeed,pcieh = <&pcie_phy2>; ++ status = "disabled"; ++ }; ++ ++ pcie_phy2: phy@14c1c000 { ++ compatible = "aspeed,ast2700-pcie-phy", "syscon"; ++ reg = <0x0 0x14c1c000 0x0 0x800>; ++ }; ++ ++ e2m_config2: e2m-config@14c1d000 { ++ compatible = "syscon", "simple-mfd"; ++ reg = <0 0x14c1d000 0 0x300>; ++ ranges = <0x0 0x0 0 0x14c1d000 0 0x300>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ aspeed,device = <&pcie_cfg2>; ++ ++ pinctrl-0 = <&pinctrl_pcierc2_perst_default>; ++ pinctrl-names = "default"; ++ ++ e2m_ic2: interrupt-controller@14 { ++ #interrupt-cells = <1>; ++ compatible = "aspeed,ast2700-e2m-ic"; ++ reg = <0 0x14 0 0x8>; ++ interrupts-extended = <&intc1_4 20>; ++ interrupt-controller; ++ }; ++ ++ pcie2_mmbi0: pcie2-mmbi@0 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT0>; ++ index = <0>; ++ pid = <2>; ++ bar = <0x1c>; ++ msi = <1>; ++ status = "disabled"; ++ }; ++ ++ pcie2_mmbi1: pcie2-mmbi@1 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT1>; ++ index = <1>; ++ pid = <3>; ++ bar = <0x50>; ++ msi = <2>; ++ status = "disabled"; ++ }; ++ ++ pcie2_mmbi2: pcie2-mmbi@2 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT2>; ++ index = <2>; ++ pid = <4>; ++ bar = <0x3c>; ++ msi = <3>; ++ status = "disabled"; ++ }; ++ ++ pcie2_mmbi3: pcie2-mmbi@3 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT3>; ++ index = <3>; ++ pid = <5>; ++ bar = <0x4c>; ++ msi = <4>; ++ status = "disabled"; ++ }; ++ ++ pcie2_mmbi4: pcie2-mmbi@4 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT4>; ++ index = <4>; ++ pid = <6>; ++ bar = <0x5c>; ++ msi = <5>; ++ status = "disabled"; ++ }; ++ ++ pcie2_mmbi5: pcie2-mmbi@5 { ++ compatible = "aspeed,ast2700-pcie-mmbi"; ++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT5>; ++ index = <5>; ++ pid = <7>; ++ bar = <0x6c>; ++ msi = <6>; ++ status = "disabled"; ++ }; ++ }; ++ ++ peci0: peci-controller@14c1f000 { ++ compatible = "aspeed,ast2600-peci"; ++ reg = <0x0 0x14c1f000 0x0 0x100>; ++ interrupts-extended = <&intc1_5 4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_IPEREFCLK>; ++ resets = <&syscon1 SCU1_RESET_PECI>; ++ cmd-timeout-ms = <1000>; ++ clock-frequency = <1000000>; ++ status = "disabled"; ++ }; ++ ++ i3c0: i3c0@14c20000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c20000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 0>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C0CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C0>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c0_default>; ++ status = "disabled"; ++ }; ++ ++ i3c1: i3c1@14c21000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c21000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 1>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C1CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C1>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c1_default>; ++ status = "disabled"; ++ }; ++ ++ i3c2: i3c2@14c22000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c22000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 2>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C2CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C2>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c2_default>; ++ status = "disabled"; ++ }; ++ ++ i3c3: i3c3@14c23000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c23000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 3>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C3CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C3>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c3_default>; ++ status = "disabled"; ++ }; ++ ++ i3c4: i3c4@14c24000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c24000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C4CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C4>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c4_default>; ++ status = "disabled"; ++ }; ++ ++ i3c5: i3c5@14c25000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c25000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 5>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C5CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C5>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c5_default>; ++ status = "disabled"; ++ }; ++ ++ i3c6: i3c6@14c26000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c26000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 6>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C6CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C6>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c6_default>; ++ status = "disabled"; ++ }; ++ ++ i3c7: i3c7@14c27000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c27000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 7>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C7CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C7>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c7_default>; ++ status = "disabled"; ++ }; ++ ++ i3c8: i3c8@14c28000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c28000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 8>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C8CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C8>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c8_default>; ++ status = "disabled"; ++ }; ++ ++ i3c9: i3c9@14c29000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c29000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 9>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C9CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C9>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c9_default>; ++ status = "disabled"; ++ }; ++ ++ i3c10: i3c10@14c2a000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2a000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 10>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C10CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C10>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c10_default>; ++ status = "disabled"; ++ }; ++ ++ i3c11: i3c11@14c2b000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2b000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 11>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C11CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C11>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i3c11_default>; ++ status = "disabled"; ++ }; ++ ++ i3c12: i3c12@14c2c000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2c000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 12>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C12CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C12>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c12_default>; ++ status = "disabled"; ++ }; ++ ++ i3c13: i3c13@14c2d000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2d000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 13>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C13CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C13>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c13_default>; ++ status = "disabled"; ++ }; ++ ++ i3c14: i3c14@14c2e000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2e000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 14>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C14CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C14>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c14_default>; ++ status = "disabled"; ++ }; ++ ++ i3c15: i3c15@14c2f000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x14c2f000 0x0 0x1000>; ++ interrupts-extended = <&intc1_3 15>; ++ clocks = <&syscon1 SCU1_CLK_GATE_I3C15CLK>; ++ resets = <&syscon1 SCU1_RESET_I3C15>, <&syscon1 SCU1_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_hvi3c15_default>; ++ status = "disabled"; ++ }; ++ ++ vuart0: serial@14c30000 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c30000 0x0 0x40>; ++ interrupts-extended = <&intc1_0 17>; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ vuart1: serial@14c30100 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c30100 0x0 0x40>; ++ interrupts-extended = <&intc1_0 18>; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ vuart2: serial@14c30200 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c30200 0x0 0x40>; ++ interrupts-extended = <&intc1_1 17>; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ vuart3: serial@14c30300 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c30300 0x0 0x40>; ++ interrupts-extended = <&intc1_1 18>; ++ clocks = <&syscon0 SCU0_CLK_APB>; ++ virtual; ++ status = "disabled"; ++ }; ++ ++ lpc0: lpc@14c31000 { ++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon"; ++ reg = <0x0 0x14c31000 0x0 0x1000>; ++ reg-io-width = <4>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x14c31000 0x1000>; ++ ++ lpc0_kcs0: lpc-kcs@24 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; ++ interrupts-extended = <&intc1_0 4>; ++ status = "disabled"; ++ }; ++ ++ lpc0_kcs1: lpc-kcs@28 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; ++ interrupts-extended = <&intc1_0 5>; ++ status = "disabled"; ++ }; ++ ++ lpc0_kcs2: lpc-kcs@2c { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; ++ interrupts-extended = <&intc1_0 6>; ++ status = "disabled"; ++ }; ++ ++ lpc0_kcs3: lpc-kcs@114 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; ++ interrupts-extended = <&intc1_0 7>; ++ status = "disabled"; ++ }; ++ ++ lpc0_snoop: lpc-snoop@80 { ++ compatible = "aspeed,ast2600-lpc-snoop"; ++ reg = <0x80 0x80>; ++ interrupts-extended = <&intc1_0 1>; ++ status = "disabled"; ++ }; ++ ++ lpc0_pcc: lpc-pcc@0 { ++ compatible = "aspeed,ast2600-lpc-pcc"; ++ reg = <0x0 0x140>; ++ interrupts-extended = <&intc1_0 3>; ++ status = "disabled"; ++ }; ++ ++ lpc0_reset: reset-controller@98 { ++ compatible = "aspeed,ast2600-lpc-reset"; ++ reg = <0x98 0x4>; ++ #reset-cells = <1>; ++ }; ++ ++ lpc0_uart_routing: uart-routing@98 { ++ compatible = "aspeed,ast2700n0-uart-routing"; ++ reg = <0x98 0x8>; ++ status = "disabled"; ++ }; ++ ++ lpc0_ibt: ibt@140 { ++ compatible = "aspeed,ast2600-ibt-bmc"; ++ reg = <0x140 0x18>; ++ interrupts-extended = <&intc1_0 2>; ++ status = "disabled"; ++ }; ++ ++ lpc0_mbox: mbox@200 { ++ compatible = "aspeed,ast2700-mbox"; ++ reg = <0x200 0xc0>; ++ interrupts-extended = <&intc1_0 20>; ++ status = "disabled"; ++ }; ++ }; ++ ++ lpc1: lpc@14c32000 { ++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon"; ++ reg = <0x0 0x14c32000 0x0 0x1000>; ++ reg-io-width = <4>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x14c32000 0x1000>; ++ ++ lpc1_kcs0: kcs@24 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>; ++ interrupts-extended = <&intc1_1 4>; ++ status = "disabled"; ++ }; ++ ++ lpc1_kcs1: kcs@28 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>; ++ interrupts-extended = <&intc1_1 5>; ++ status = "disabled"; ++ }; ++ ++ lpc1_kcs2: kcs@2c { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>; ++ interrupts-extended = <&intc1_1 6>; ++ status = "disabled"; ++ }; ++ ++ lpc1_kcs3: kcs@114 { ++ compatible = "aspeed,ast2600-kcs-bmc"; ++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>; ++ interrupts-extended = <&intc1_1 7>; ++ status = "disabled"; ++ }; ++ ++ lpc1_snoop: lpc-snoop@80 { ++ compatible = "aspeed,ast2600-lpc-snoop"; ++ reg = <0x80 0x80>; ++ interrupts-extended = <&intc1_1 1>; ++ status = "disabled"; ++ }; ++ ++ lpc1_pcc: lpc-pcc@0 { ++ compatible = "aspeed,ast2600-lpc-pcc"; ++ reg = <0x0 0x140>; ++ interrupts-extended = <&intc1_1 3>; ++ status = "disabled"; ++ }; ++ ++ lpc1_reset: reset-controller@98 { ++ compatible = "aspeed,ast2600-lpc-reset"; ++ reg = <0x98 0x4>; ++ #reset-cells = <1>; ++ }; ++ ++ lpc1_uart_routing: uart-routing@98 { ++ compatible = "aspeed,ast2700n1-uart-routing"; ++ reg = <0x98 0x8>; ++ status = "disabled"; ++ }; ++ ++ lpc1_ibt: ibt@140 { ++ compatible = "aspeed,ast2600-ibt-bmc"; ++ reg = <0x140 0x18>; ++ interrupts-extended = <&intc1_1 2>; ++ status = "disabled"; ++ }; ++ ++ lpc1_mbox: mbox@200 { ++ compatible = "aspeed,ast2700-mbox"; ++ reg = <0x200 0xc0>; ++ interrupts-extended = <&intc1_1 20>; ++ status = "disabled"; ++ }; ++ }; ++ ++ uart0: serial@14c33000 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33000 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART0CLK>; ++ resets = <&lpc0_reset 4>; ++ interrupts-extended = <&intc1_4 7>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd0_default &pinctrl_rxd0_default>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@14c33100 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33100 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART1CLK>; ++ resets = <&lpc0_reset 5>; ++ interrupts-extended = <&intc1_4 8>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd1_default &pinctrl_rxd1_default>; ++ status = "disabled"; ++ }; ++ ++ uart2: serial@14c33200 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33200 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART2CLK>; ++ resets = <&lpc0_reset 6>; ++ interrupts-extended = <&intc1_4 9>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd2_default &pinctrl_rxd2_default>; ++ status = "disabled"; ++ }; ++ ++ uart3: serial@14c33300 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33300 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART3CLK>; ++ resets = <&lpc0_reset 7>; ++ interrupts-extended = <&intc1_4 10>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd3_default &pinctrl_rxd3_default>; ++ status = "disabled"; ++ }; ++ ++ uart5: serial@14c33400 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33400 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART5CLK>; ++ resets = <&lpc1_reset 4>; ++ interrupts-extended = <&intc1_4 11>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd5_default &pinctrl_rxd5_default>; ++ status = "disabled"; ++ }; ++ ++ uart6: serial@14c33500 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33500 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART6CLK>; ++ resets = <&lpc1_reset 5>; ++ interrupts-extended = <&intc1_4 12>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd6_default &pinctrl_rxd6_default>; ++ status = "disabled"; ++ }; ++ ++ uart7: serial@14c33600 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33600 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART7CLK>; ++ resets = <&lpc1_reset 6>; ++ interrupts-extended = <&intc1_4 13>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd7_default &pinctrl_rxd7_default>; ++ status = "disabled"; ++ }; ++ ++ uart8: serial@14c33700 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33700 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART8CLK>; ++ resets = <&lpc1_reset 7>; ++ interrupts-extended = <&intc1_4 14>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd8_default &pinctrl_rxd8_default>; ++ status = "disabled"; ++ }; ++ ++ uart9: serial@14c33800 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33800 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART9CLK>; ++ interrupts-extended = <&intc1_4 15>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd9_default &pinctrl_rxd9_default>; ++ status = "disabled"; ++ }; ++ ++ uart10: serial@14c33900 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33900 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART10CLK>; ++ interrupts-extended = <&intc1_4 16>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd10_default &pinctrl_rxd10_default>; ++ status = "disabled"; ++ }; ++ ++ uart11: serial@14c33a00 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33a00 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART11CLK>; ++ interrupts-extended = <&intc1_4 17>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_txd11_default &pinctrl_rxd11_default>; ++ status = "disabled"; ++ }; ++ ++ uart12: serial@14c33b00 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33b00 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_GATE_UART12CLK>; ++ interrupts-extended = <&intc1_4 18>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ status = "disabled"; ++ }; ++ ++ uart13: serial@14c33c00 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33c00 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_UART13>; ++ interrupts-extended = <&intc1_0 23>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ status = "disabled"; ++ }; ++ ++ uart14: serial@14c33d00 { ++ compatible = "aspeed,ast2700-uart"; ++ reg = <0x0 0x14c33d00 0x0 0x100>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ clocks = <&syscon1 SCU1_CLK_UART14>; ++ interrupts-extended = <&intc1_1 23>; ++ no-loopback-test; ++ pinctrl-names = "default"; ++ status = "disabled"; ++ }; ++ ++ ltpi0: ltpi@14c34000 { ++ compatible = "aspeed-ltpi"; ++ reg = <0x0 0x14c34000 0x0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_GATE_LTPICLK>, ++ <&syscon1 SCU1_CLK_GATE_LTPIPHYCLK>; ++ clock-names = "ahb", "phy"; ++ resets = <&syscon1 SCU1_RESET_LTPI0>; ++ interrupts-extended = <&intc1_5 12>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_gpio: ltpi0-gpio@14c34c00 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-ltpi-gpio"; ++ reg = <0x0 0x14c34c00 0x0 0x300>; ++ interrupts-extended = <&intc1_0 22>; ++ ngpios = <112>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ status = "disabled"; ++ }; ++ ++ ltpi1: ltpi@14c35000 { ++ compatible = "aspeed-ltpi"; ++ reg = <0x0 0x14c35000 0x0 0x100>; ++ clocks = <&syscon1 SCU1_CLK_GATE_LTPICLK>, ++ <&syscon1 SCU1_CLK_GATE_LTPI1TXCLK>; ++ clock-names = "ahb", "phy"; ++ resets = <&syscon1 SCU1_RESET_LTPI1>; ++ interrupts-extended = <&intc1_5 13>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_gpio: ltpi1-gpio@14c35c00 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-ltpi-gpio"; ++ reg = <0x0 0x14c35c00 0x0 0x300>; ++ interrupts-extended = <&intc1_1 22>; ++ ngpios = <112>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ status = "disabled"; ++ }; ++ ++ wdt0: watchdog@14c37000 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x14c37000 0x0 0x80>; ++ }; ++ ++ wdt1: watchdog@14c37080 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x14c37080 0x0 0x80>; ++ }; ++ ++ wdt2: watchdog@14c37100 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x14c37100 0x0 0x80>; ++ status = "disabled"; ++ }; ++ ++ wdt3: watchdog@14c37180 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x14c37180 0x0 0x80>; ++ status = "disabled"; ++ }; ++ ++ mbox2: mbox@14c39200 { ++ compatible = "aspeed,ast2700-mailbox"; ++ reg = <0x0 0x14c39200 0x0 0x100>, <0x0 0x14c39300 0x0 0x100>; ++ reg-names = "tx", "rx"; ++ interrupts-extended = <&intc1_5 17>; ++ #mbox-cells = <1>; ++ mbox-name = "bootmcu"; ++ }; ++ ++ sgpios: sgpios@14c3c000 { ++ #gpio-cells = <2>; ++ gpio-controller; ++ compatible = "aspeed,ast2700-sgpios"; ++ reg = <0x0 0x14c3c000 0x0 0x100>; ++ interrupts-extended = <&intc1_2 29>; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ ngpios = <72>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sgps_default>; ++ status = "disabled"; ++ }; ++ ++ fsim0: fsi@21800000 { ++ compatible = "aspeed,ast2700-fsi-master", "fsi-master"; ++ reg = <0x0 0x21800000 0x0 0x94>; ++ interrupts-extended = <&intc1_5 6>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_fsi0_default>; ++ clocks = <&syscon1 SCU1_CLK_GATE_FSICLK>; ++ resets = <&syscon1 SCU1_RESET_FSI>; ++ status = "disabled"; ++ }; ++ ++ fsim1: fsi@23800000 { ++ compatible = "aspeed,ast2700-fsi-master", "fsi-master"; ++ reg = <0x0 0x23800000 0x0 0x94>; ++ interrupts-extended = <&intc1_5 7>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_fsi1_default>; ++ clocks = <&syscon1 SCU1_CLK_GATE_FSICLK>; ++ resets = <&syscon1 SCU1_RESET_FSI>; ++ status = "disabled"; ++ }; ++ ++ rtc_over_espi0: rtc-over_espi@14c3d000 { ++ compatible = "aspeed,ast2700-rtc-over-espi"; ++ reg = <0x0 0x14c3d000 0x0 0x100>; ++ interval-ms = <1000>; ++ status = "disabled"; ++ }; ++ ++ rtc_over_espi1: rtc-over_espi@14c3d100 { ++ compatible = "aspeed,ast2700-rtc-over-espi"; ++ reg = <0x0 0x14c3d100 0x0 0x100>; ++ interval-ms = <1000>; ++ status = "disabled"; ++ }; ++ ++ }; ++}; ++ ++#include "aspeed-g7-pinctrl.dtsi" ++ ++&i2c { ++ i2c_global: i2c-global-regs@0 { ++ compatible = "aspeed,i2c-global", "simple-mfd", "syscon"; ++ reg = <0x0 0x100>; ++ }; ++ ++ i2c0: i2c-bus@100 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x100 0x80>, <0x1A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 0>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c0_default>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c-bus@200 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x200 0x80>, <0x2A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 1>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c1_default>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c-bus@300 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x300 0x80>, <0x3A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 2>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c2_default>; ++ status = "disabled"; ++ }; ++ ++ i2c3: i2c-bus@400 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x400 0x80>, <0x4A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 3>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c3_default>; ++ status = "disabled"; ++ }; ++ ++ i2c4: i2c-bus@500 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x500 0x80>, <0x5A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ interrupts-extended = <&intc1_2 4>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c4_default>; ++ status = "disabled"; ++ }; ++ ++ i2c5: i2c-bus@600 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x600 0x80>, <0x6A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 5>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c5_default>; ++ status = "disabled"; ++ }; ++ ++ i2c6: i2c-bus@700 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x700 0x80>, <0x7A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 6>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c6_default>; ++ status = "disabled"; ++ }; ++ ++ i2c7: i2c-bus@800 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x800 0x80>, <0x8A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 7>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c7_default>; ++ status = "disabled"; ++ }; ++ ++ i2c8: i2c-bus@900 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x900 0x80>, <0x9A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 8>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c8_default>; ++ status = "disabled"; ++ }; ++ ++ i2c9: i2c-bus@a00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xA00 0x80>, <0xAA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 9>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c9_default>; ++ status = "disabled"; ++ }; ++ ++ i2c10: i2c-bus@b00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xB00 0x80>, <0xBA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 10>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c10_default>; ++ status = "disabled"; ++ }; ++ ++ i2c11: i2c-bus@c00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xC00 0x80>, <0xCA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 11>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c11_default>; ++ status = "disabled"; ++ }; ++ ++ i2c12: i2c-bus@d00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xD00 0x80>, <0xDA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 12>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c12_default>; ++ status = "disabled"; ++ }; ++ ++ i2c13: i2c-bus@e00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xE00 0x80>, <0xEA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 13>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c13_default>; ++ status = "disabled"; ++ }; ++ ++ i2c14: i2c-bus@f00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xF00 0x80>, <0xFA0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 14>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c14_default>; ++ status = "disabled"; ++ }; ++ ++ i2c15: i2c-bus@1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x1000 0x80>, <0x10A0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <&i2c_global>; ++ aspeed,enable-dma; ++ clocks = <&syscon1 SCU1_CLK_APB>; ++ resets = <&syscon1 SCU1_RESET_I2C>; ++ interrupts-extended = <&intc1_2 15>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c15_default>; ++ status = "disabled"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-ltpi0-pinctrl.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-ltpi0-pinctrl.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-ltpi0-pinctrl.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-ltpi0-pinctrl.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,409 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++<pi0_pinctrl { ++ pinctrl_ltpi0_i2c0_default: ltpi0-i2c0-default { ++ function = "I2C0"; ++ groups = "I2C0"; ++ }; ++ ++ pinctrl_ltpi0_i2c1_default: ltpi0-i2c1-default { ++ function = "I2C1"; ++ groups = "I2C1"; ++ }; ++ ++ pinctrl_ltpi0_i2c2_default: ltpi0-i2c2-default { ++ function = "I2C2"; ++ groups = "I2C2"; ++ }; ++ ++ pinctrl_ltpi0_i2c3_default: ltpi0-i2c3-default { ++ function = "I2C3"; ++ groups = "I2C3"; ++ }; ++ ++ pinctrl_ltpi0_i2c4_default: ltpi0-i2c4-default { ++ function = "I2C4"; ++ groups = "I2C4"; ++ }; ++ ++ pinctrl_ltpi0_i2c5_default: ltpi0-i2c5-default { ++ function = "I2C5"; ++ groups = "I2C5"; ++ }; ++ ++ pinctrl_ltpi0_i2c6_default: ltpi0-i2c6-default { ++ function = "I2C6"; ++ groups = "I2C6"; ++ }; ++ ++ pinctrl_ltpi0_i2c7_default: ltpi0-i2c7-default { ++ function = "I2C7"; ++ groups = "I2C7"; ++ }; ++ ++ pinctrl_ltpi0_i2c8_default: ltpi0-i2c8-default { ++ function = "I2C8"; ++ groups = "I2C8"; ++ }; ++ ++ pinctrl_ltpi0_i2c9_default: ltpi0-i2c9-default { ++ function = "I2C9"; ++ groups = "I2C9"; ++ }; ++ ++ pinctrl_ltpi0_i2c10_default: ltpi0-i2c10-default { ++ function = "I2C10"; ++ groups = "I2C10"; ++ }; ++ ++ pinctrl_ltpi0_i2c11_default: ltpi0-i2c11-default { ++ function = "I2C11"; ++ groups = "I2C11"; ++ }; ++ ++ pinctrl_ltpi0_i2c12_default: ltpi0-i2c12-default { ++ function = "I2C12"; ++ groups = "I2C12"; ++ }; ++ ++ pinctrl_ltpi0_i2c13_default: ltpi0-i2c13-default { ++ function = "I2C13"; ++ groups = "I2C13"; ++ }; ++ ++ pinctrl_ltpi0_i2c14_default: ltpi0-i2c14-default { ++ function = "I2C14"; ++ groups = "I2C14"; ++ }; ++ ++ pinctrl_ltpi0_i2c15_default: ltpi0-i2c15-default { ++ function = "I2C15"; ++ groups = "I2C15"; ++ }; ++ ++ pinctrl_ltpi0_pwm0_default: ltpi0-pwm0-default { ++ function = "PWM0"; ++ groups = "PWM0"; ++ }; ++ ++ pinctrl_ltpi0_pwm1_default: ltpi0-pwm1-default { ++ function = "PWM1"; ++ groups = "PWM1"; ++ }; ++ ++ pinctrl_ltpi0_pwm2_default: ltpi0-pwm2-default { ++ function = "PWM2"; ++ groups = "PWM2"; ++ }; ++ ++ pinctrl_ltpi0_pwm3_default: ltpi0-pwm3-default { ++ function = "PWM3"; ++ groups = "PWM3"; ++ }; ++ ++ pinctrl_ltpi0_pwm4_default: ltpi0-pwm4-default { ++ function = "PWM4"; ++ groups = "PWM4"; ++ }; ++ ++ pinctrl_ltpi0_pwm5_default: ltpi0-pwm5-default { ++ function = "PWM5"; ++ groups = "PWM5"; ++ }; ++ ++ pinctrl_ltpi0_pwm6_default: ltpi0-pwm6-default { ++ function = "PWM6"; ++ groups = "PWM6"; ++ }; ++ ++ pinctrl_ltpi0_pwm7_default: ltpi0-pwm7-default { ++ function = "PWM7"; ++ groups = "PWM7"; ++ }; ++ ++ pinctrl_ltpi0_pwm8_default: ltpi0-pwm8-default { ++ function = "PWM8"; ++ groups = "PWM8"; ++ }; ++ ++ pinctrl_ltpi0_pwm9_default: ltpi0-pwm9-default { ++ function = "PWM9"; ++ groups = "PWM9"; ++ }; ++ ++ pinctrl_ltpi0_pwm10_default: ltpi0-pwm10-default { ++ function = "PWM10"; ++ groups = "PWM10"; ++ }; ++ ++ pinctrl_ltpi0_pwm11_default: ltpi0-pwm11-default { ++ function = "PWM11"; ++ groups = "PWM11"; ++ }; ++ ++ pinctrl_ltpi0_pwm12_default: ltpi0-pwm12-default { ++ function = "PWM12"; ++ groups = "PWM12"; ++ }; ++ ++ pinctrl_ltpi0_pwm13_default: ltpi0-pwm13-default { ++ function = "PWM13"; ++ groups = "PWM13"; ++ }; ++ ++ pinctrl_ltpi0_pwm14_default: ltpi0-pwm14-default { ++ function = "PWM14"; ++ groups = "PWM14"; ++ }; ++ ++ pinctrl_ltpi0_pwm15_default: ltpi0-pwm15-default { ++ function = "PWM15"; ++ groups = "PWM15"; ++ }; ++ ++ pinctrl_ltpi0_tach0_default: ltpi0-tach0-default { ++ function = "TACH0"; ++ groups = "TACH0"; ++ }; ++ ++ pinctrl_ltpi0_tach1_default: ltpi0-tach1-default { ++ function = "TACH1"; ++ groups = "TACH1"; ++ }; ++ ++ pinctrl_ltpi0_tach2_default: ltpi0-tach2-default { ++ function = "TACH2"; ++ groups = "TACH2"; ++ }; ++ ++ pinctrl_ltpi0_tach3_default: ltpi0-tach3-default { ++ function = "TACH3"; ++ groups = "TACH3"; ++ }; ++ ++ pinctrl_ltpi0_tach4_default: ltpi0-tach4-default { ++ function = "TACH4"; ++ groups = "TACH4"; ++ }; ++ ++ pinctrl_ltpi0_tach5_default: ltpi0-tach5-default { ++ function = "TACH5"; ++ groups = "TACH5"; ++ }; ++ ++ pinctrl_ltpi0_tach6_default: ltpi0-tach6-default { ++ function = "TACH6"; ++ groups = "TACH6"; ++ }; ++ ++ pinctrl_ltpi0_tach7_default: ltpi0-tach7-default { ++ function = "TACH7"; ++ groups = "TACH7"; ++ }; ++ ++ pinctrl_ltpi0_tach8_default: ltpi0-tach8-default { ++ function = "TACH8"; ++ groups = "TACH8"; ++ }; ++ ++ pinctrl_ltpi0_tach9_default: ltpi0-tach9-default { ++ function = "TACH9"; ++ groups = "TACH9"; ++ }; ++ ++ pinctrl_ltpi0_tach10_default: ltpi0-tach10-default { ++ function = "TACH10"; ++ groups = "TACH10"; ++ }; ++ ++ pinctrl_ltpi0_tach11_default: ltpi0-tach11-default { ++ function = "TACH11"; ++ groups = "TACH11"; ++ }; ++ ++ pinctrl_ltpi0_tach12_default: ltpi0-tach12-default { ++ function = "TACH12"; ++ groups = "TACH12"; ++ }; ++ ++ pinctrl_ltpi0_tach13_default: ltpi0-tach13-default { ++ function = "TACH13"; ++ groups = "TACH13"; ++ }; ++ ++ pinctrl_ltpi0_tach14_default: ltpi0-tach14-default { ++ function = "TACH14"; ++ groups = "TACH14"; ++ }; ++ ++ pinctrl_ltpi0_tach15_default: ltpi0-tach15-default { ++ function = "TACH15"; ++ groups = "TACH15"; ++ }; ++ ++ pinctrl_ltpi0_jtagm_default: ltpi0-jtagm-default { ++ function = "JTAGM"; ++ groups = "JTAGM"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c0_default: ltpi0-hvi3c0-default { ++ function = "I3C0"; ++ groups = "HVI3C0"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c1_default: ltpi0-hvi3c1-default { ++ function = "I3C1"; ++ groups = "HVI3C1"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c2_default: ltpi0-hvi3c2-default { ++ function = "I3C2"; ++ groups = "HVI3C2"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c3_default: ltpi0-hvi3c3-default { ++ function = "I3C3"; ++ groups = "HVI3C3"; ++ }; ++ ++ pinctrl_ltpi0_i3c4_default: ltpi0-i3c4-default { ++ function = "I3C4"; ++ groups = "I3C4"; ++ }; ++ ++ pinctrl_ltpi0_i3c5_default: ltpi0-i3c5-default { ++ function = "I3C5"; ++ groups = "I3C5"; ++ }; ++ ++ pinctrl_ltpi0_i3c6_default: ltpi0-i3c6-default { ++ function = "I3C6"; ++ groups = "I3C6"; ++ }; ++ ++ pinctrl_ltpi0_i3c7_default: ltpi0-i3c7-default { ++ function = "I3C7"; ++ groups = "I3C7"; ++ }; ++ ++ pinctrl_ltpi0_i3c8_default: ltpi0-i3c8-default { ++ function = "I3C8"; ++ groups = "I3C8"; ++ }; ++ ++ pinctrl_ltpi0_i3c9_default: ltpi0-i3c9-default { ++ function = "I3C9"; ++ groups = "I3C9"; ++ }; ++ ++ pinctrl_ltpi0_i3c10_default: ltpi0-i3c10-default { ++ function = "I3C10"; ++ groups = "I3C10"; ++ }; ++ ++ pinctrl_ltpi0_i3c11_default: ltpi0-i3c11-default { ++ function = "I3C11"; ++ groups = "I3C11"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c12_default: ltpi0-hvi3c12-default { ++ function = "I3C12"; ++ groups = "HVI3C12"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c13_default: ltpi0-hvi3c13-default { ++ function = "I3C13"; ++ groups = "HVI3C13"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c14_default: ltpi0-hvi3c14-default { ++ function = "I3C14"; ++ groups = "HVI3C14"; ++ }; ++ ++ pinctrl_ltpi0_hvi3c15_default: ltpi0-hvi3c15-default { ++ function = "I3C15"; ++ groups = "HVI3C15"; ++ }; ++ ++ pinctrl_ltpi0_adc0_default: ltpi0-adc0-default { ++ function = "ADC0"; ++ groups = "ADC0"; ++ }; ++ ++ pinctrl_ltpi0_adc1_default: ltpi0-adc1-default { ++ function = "ADC1"; ++ groups = "ADC1"; ++ }; ++ ++ pinctrl_ltpi0_adc2_default: ltpi0-adc2-default { ++ function = "ADC2"; ++ groups = "ADC2"; ++ }; ++ ++ pinctrl_ltpi0_adc3_default: ltpi0-adc3-default { ++ function = "ADC3"; ++ groups = "ADC3"; ++ }; ++ ++ pinctrl_ltpi0_adc4_default: ltpi0-adc4-default { ++ function = "ADC4"; ++ groups = "ADC4"; ++ }; ++ ++ pinctrl_ltpi0_adc5_default: ltpi0-adc5-default { ++ function = "ADC5"; ++ groups = "ADC5"; ++ }; ++ ++ pinctrl_ltpi0_adc6_default: ltpi0-adc6-default { ++ function = "ADC6"; ++ groups = "ADC6"; ++ }; ++ ++ pinctrl_ltpi0_adc7_default: ltpi0-adc7-default { ++ function = "ADC7"; ++ groups = "ADC7"; ++ }; ++ ++ pinctrl_ltpi0_adc8_default: ltpi0-adc8-default { ++ function = "ADC8"; ++ groups = "ADC8"; ++ }; ++ ++ pinctrl_ltpi0_adc9_default: ltpi0-adc9-default { ++ function = "ADC9"; ++ groups = "ADC9"; ++ }; ++ ++ pinctrl_ltpi0_adc10_default: ltpi0-adc10-default { ++ function = "ADC10"; ++ groups = "ADC10"; ++ }; ++ ++ pinctrl_ltpi0_adc11_default: ltpi0-adc11-default { ++ function = "ADC11"; ++ groups = "ADC11"; ++ }; ++ ++ pinctrl_ltpi0_adc12_default: ltpi0-adc12-default { ++ function = "ADC12"; ++ groups = "ADC12"; ++ }; ++ ++ pinctrl_ltpi0_adc13_default: ltpi0-adc13-default { ++ function = "ADC13"; ++ groups = "ADC13"; ++ }; ++ ++ pinctrl_ltpi0_adc14_default: ltpi0-adc14-default { ++ function = "ADC14"; ++ groups = "ADC14"; ++ }; ++ ++ pinctrl_ltpi0_adc15_default: ltpi0-adc15-default { ++ function = "ADC15"; ++ groups = "ADC15"; ++ }; ++ ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-ltpi0.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-ltpi0.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-ltpi0.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-ltpi0.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,659 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++#include ++#include ++ ++/ { ++ aliases { ++ i3c100 = <pi0_i3c0; ++ i3c101 = <pi0_i3c1; ++ i3c102 = <pi0_i3c2; ++ i3c103 = <pi0_i3c3; ++ i3c104 = <pi0_i3c4; ++ i3c105 = <pi0_i3c5; ++ i3c106 = <pi0_i3c6; ++ i3c107 = <pi0_i3c7; ++ i3c108 = <pi0_i3c8; ++ i3c109 = <pi0_i3c9; ++ i3c110 = <pi0_i3c10; ++ i3c111 = <pi0_i3c11; ++ i3c112 = <pi0_i3c12; ++ i3c113 = <pi0_i3c13; ++ i3c114 = <pi0_i3c14; ++ i3c115 = <pi0_i3c15; ++ }; ++ ++ ltpi0_bus: ltpi0_bus@30000000 { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ ltpi0_syscon: syscon@30c02000 { ++ compatible = "aspeed,ast1700-scu", "syscon", "simple-mfd"; ++ reg = <0x0 0x30c02000 0x0 0x1000>; ++ ranges = <0x0 0x0 0x0 0x30c02000 0x0 0x1000>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ ++ ltpi0_rst: reset-controller@200 { ++ reg = <0x0 0x200 0x0 0x40>; ++ }; ++ ++ ltpi0_clk: clock-controller@240 { ++ reg = <0x0 0x240 0x0 0x1c0>; ++ }; ++ ++ ltpi0_pinctrl: pinctrl@400 { ++ compatible = "aspeed,ast1700-pinctrl"; ++ reg = <0x0 0x400 0x0 0x100>; ++ }; ++ }; ++ ++ ltpi0_spi2: spi@30030000 { ++ reg = <0x0 0x30030000 0x0 0x1f0>, <0x2 0x80000000 0x0 0x04000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <<pi0_syscon AST1700_CLK_AHB>; ++ resets = <<pi0_syscon AST1700_RESET_SPI2>; ++ num-cs = <2>; ++ ltpi-base = <0x0 0x34000000>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_pwm_tach: pwm-tach-controller@300c0000 { ++ compatible = "aspeed,ast2700-pwm-tach"; ++ reg = <0x0 0x300c0000 0 0x100>; ++ clocks = <<pi0_syscon AST1700_CLK_AHB>; ++ resets = <<pi0_syscon AST1700_RESET_PWM>; ++ #pwm-cells = <3>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_adc0: adc@30c00000 { ++ compatible = "aspeed,ast2700-adc0"; ++ reg = <0x0 0x30c00000 0 0x100>; ++ clocks = <<pi0_syscon AST1700_CLK_AHB>; ++ resets = <<pi0_syscon AST1700_RESET_ADC>; ++ interrupts-extended = <<pi0_intc1_0 24>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi0_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_adc1: adc@30c00100 { ++ compatible = "aspeed,ast2700-adc1"; ++ reg = <0x0 0x30c00100 0x0 0x100>; ++ clocks = <<pi0_syscon AST1700_CLK_AHB>; ++ resets = <<pi0_syscon AST1700_RESET_ADC>; ++ interrupts-extended = <<pi0_intc1_0 24>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi0_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_jtag: jtag@30c09000 { ++ compatible = "aspeed,ast2700-jtag"; ++ reg= <0x0 0x30c09000 0x0 0x40>; ++ clocks = <<pi0_syscon AST1700_CLK_AHB>; ++ resets = <<pi0_syscon AST1700_RESET_JTAG1>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c: bus@30c0f000 { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x30c0f000 0x1100>; ++ }; ++ ++ ltpi0_intc1: interrupt-controller@30c18000 { ++ compatible = "simple-mfd"; ++ reg = <0 0x30c18000 0 0x400>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0x0 0x0 0x0 0x30c18000 0x0 0x400>; ++ ++ ltpi0_intc1_0: interrupt-controller@100 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x100 0x0 0x10>; ++ interrupts-extended = <&intc0_11 6>; ++ }; ++ ++ ltpi0_intc1_1: interrupt-controller@110 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x110 0x0 0x10>; ++ interrupts-extended = <&intc0_11 7>; ++ }; ++ }; ++ ++ ltpi0_i3c0: i3c0@30c20000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c20000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 0>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C0CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C0>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c1: i3c1@30c21000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c21000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 1>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C1CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C1>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c2: i3c2@30c22000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c22000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 2>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C2CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C2>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c3: i3c3@30c23000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c23000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 3>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C3CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C3>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c4: i3c4@30c24000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c24000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 4>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C4CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C4>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c5: i3c5@30c25000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c25000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 5>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C5CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C5>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c6: i3c6@30c26000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c26000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 6>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C6CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C6>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c7: i3c7@30c27000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c27000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 7>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C7CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C7>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c8: i3c8@30c28000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c28000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 8>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C8CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C8>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c9: i3c9@30c29000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c29000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 9>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C9CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C9>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c10: i3c10@30c2a000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2a000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 10>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C10CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C10>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c11: i3c11@30c2b000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2b000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 11>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C11CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C11>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c12: i3c12@30c2c000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2c000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 12>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C12CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C12>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c13: i3c13@30c2d000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2d000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 13>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C13CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C13>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c14: i3c14@30c2e000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2e000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 14>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C14CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C14>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c15: i3c15@30c2f000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2f000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_1 15>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_I3C15CLK>; ++ resets = <<pi0_syscon AST1700_RESET_I3C15>, <<pi0_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c15_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_ltpi: ltpi@30c34000 { ++ compatible = "aspeed-ast1700-ltpi"; ++ reg = <0x0 0x30c34000 0x0 0x100>; ++ clocks = <<pi0_syscon AST1700_CLK_GATE_LTPICLK>, ++ <<pi0_syscon AST1700_CLK_GATE_LTPIPHYCLK>; ++ clock-names = "ahb", "phy"; ++ resets = <<pi0_syscon AST1700_RESET_LTPI>; ++ remote-controller; ++ aspeed,scu = <<pi0_syscon>; ++ i2c-tunneling = <0x0>; ++ status = "okay"; ++ }; ++ ++ ltpi0_wdt0: watchdog@30c37000 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x30c37000 0x0 0x40>; ++ }; ++ }; ++}; ++ ++#include "aspeed-ltpi0-pinctrl.dtsi" ++ ++<pi0_i2c { ++ ltpi0_i2c_global: i2c-global-regs@0 { ++ compatible = "aspeed,i2c-global", "simple-mfd", "syscon"; ++ reg = <0x0 0x100>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c0: i2c-bus@100 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x100 0x80>, <0x1a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 0>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c1: i2c-bus@200 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x200 0x80>, <0x2a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 1>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c2: i2c-bus@300 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x300 0x80>, <0x3a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 2>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c3: i2c-bus@400 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x400 0x80>, <0x4a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 3>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c4: i2c-bus@500 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x500 0x80>, <0x5a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 4>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c5: i2c-bus@600 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x600 0x80>, <0x6a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 5>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c6: i2c-bus@700 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x700 0x80>, <0x7a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 6>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c7: i2c-bus@800 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x800 0x80>, <0x8a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 7>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c8: i2c-bus@900 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x900 0x80>, <0x9a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 8>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c9: i2c-bus@a00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xa00 0x80>, <0xaa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 9>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c10: i2c-bus@b00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xb00 0x80>, <0xba0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 10>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c11: i2c-bus@c00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xc00 0x80>, <0xca0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 11>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c12: i2c-bus@d00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xd00 0x80>, <0xda0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 12>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c13: i2c-bus@e00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xe00 0x80>, <0xea0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 13>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c14: i2c-bus@f00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xf00 0x80>, <0xfa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 14>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c15: i2c-bus@1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x1000 0x80>, <0x10a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1700_CLK_APB>; ++ resets = <<pi0_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi0_intc1_0 15>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c15_default>; ++ status = "disabled"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1-pinctrl.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1-pinctrl.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1-pinctrl.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1-pinctrl.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,409 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++<pi1_pinctrl { ++ pinctrl_ltpi1_i2c0_default: ltpi1-i2c0-default { ++ function = "I2C0"; ++ groups = "I2C0"; ++ }; ++ ++ pinctrl_ltpi1_i2c1_default: ltpi1-i2c1-default { ++ function = "I2C1"; ++ groups = "I2C1"; ++ }; ++ ++ pinctrl_ltpi1_i2c2_default: ltpi1-i2c2-default { ++ function = "I2C2"; ++ groups = "I2C2"; ++ }; ++ ++ pinctrl_ltpi1_i2c3_default: ltpi1-i2c3-default { ++ function = "I2C3"; ++ groups = "I2C3"; ++ }; ++ ++ pinctrl_ltpi1_i2c4_default: ltpi1-i2c4-default { ++ function = "I2C4"; ++ groups = "I2C4"; ++ }; ++ ++ pinctrl_ltpi1_i2c5_default: ltpi1-i2c5-default { ++ function = "I2C5"; ++ groups = "I2C5"; ++ }; ++ ++ pinctrl_ltpi1_i2c6_default: ltpi1-i2c6-default { ++ function = "I2C6"; ++ groups = "I2C6"; ++ }; ++ ++ pinctrl_ltpi1_i2c7_default: ltpi1-i2c7-default { ++ function = "I2C7"; ++ groups = "I2C7"; ++ }; ++ ++ pinctrl_ltpi1_i2c8_default: ltpi1-i2c8-default { ++ function = "I2C8"; ++ groups = "I2C8"; ++ }; ++ ++ pinctrl_ltpi1_i2c9_default: ltpi1-i2c9-default { ++ function = "I2C9"; ++ groups = "I2C9"; ++ }; ++ ++ pinctrl_ltpi1_i2c10_default: ltpi1-i2c10-default { ++ function = "I2C10"; ++ groups = "I2C10"; ++ }; ++ ++ pinctrl_ltpi1_i2c11_default: ltpi1-i2c11-default { ++ function = "I2C11"; ++ groups = "I2C11"; ++ }; ++ ++ pinctrl_ltpi1_i2c12_default: ltpi1-i2c12-default { ++ function = "I2C12"; ++ groups = "I2C12"; ++ }; ++ ++ pinctrl_ltpi1_i2c13_default: ltpi1-i2c13-default { ++ function = "I2C13"; ++ groups = "I2C13"; ++ }; ++ ++ pinctrl_ltpi1_i2c14_default: ltpi1-i2c14-default { ++ function = "I2C14"; ++ groups = "I2C14"; ++ }; ++ ++ pinctrl_ltpi1_i2c15_default: ltpi1-i2c15-default { ++ function = "I2C15"; ++ groups = "I2C15"; ++ }; ++ ++ pinctrl_ltpi1_pwm0_default: ltpi1-pwm0-default { ++ function = "PWM0"; ++ groups = "PWM0"; ++ }; ++ ++ pinctrl_ltpi1_pwm1_default: ltpi1-pwm1-default { ++ function = "PWM1"; ++ groups = "PWM1"; ++ }; ++ ++ pinctrl_ltpi1_pwm2_default: ltpi1-pwm2-default { ++ function = "PWM2"; ++ groups = "PWM2"; ++ }; ++ ++ pinctrl_ltpi1_pwm3_default: ltpi1-pwm3-default { ++ function = "PWM3"; ++ groups = "PWM3"; ++ }; ++ ++ pinctrl_ltpi1_pwm4_default: ltpi1-pwm4-default { ++ function = "PWM4"; ++ groups = "PWM4"; ++ }; ++ ++ pinctrl_ltpi1_pwm5_default: ltpi1-pwm5-default { ++ function = "PWM5"; ++ groups = "PWM5"; ++ }; ++ ++ pinctrl_ltpi1_pwm6_default: ltpi1-pwm6-default { ++ function = "PWM6"; ++ groups = "PWM6"; ++ }; ++ ++ pinctrl_ltpi1_pwm7_default: ltpi1-pwm7-default { ++ function = "PWM7"; ++ groups = "PWM7"; ++ }; ++ ++ pinctrl_ltpi1_pwm8_default: ltpi1-pwm8-default { ++ function = "PWM8"; ++ groups = "PWM8"; ++ }; ++ ++ pinctrl_ltpi1_pwm9_default: ltpi1-pwm9-default { ++ function = "PWM9"; ++ groups = "PWM9"; ++ }; ++ ++ pinctrl_ltpi1_pwm10_default: ltpi1-pwm10-default { ++ function = "PWM10"; ++ groups = "PWM10"; ++ }; ++ ++ pinctrl_ltpi1_pwm11_default: ltpi1-pwm11-default { ++ function = "PWM11"; ++ groups = "PWM11"; ++ }; ++ ++ pinctrl_ltpi1_pwm12_default: ltpi1-pwm12-default { ++ function = "PWM12"; ++ groups = "PWM12"; ++ }; ++ ++ pinctrl_ltpi1_pwm13_default: ltpi1-pwm13-default { ++ function = "PWM13"; ++ groups = "PWM13"; ++ }; ++ ++ pinctrl_ltpi1_pwm14_default: ltpi1-pwm14-default { ++ function = "PWM14"; ++ groups = "PWM14"; ++ }; ++ ++ pinctrl_ltpi1_pwm15_default: ltpi1-pwm15-default { ++ function = "PWM15"; ++ groups = "PWM15"; ++ }; ++ ++ pinctrl_ltpi1_tach0_default: ltpi1-tach0-default { ++ function = "TACH0"; ++ groups = "TACH0"; ++ }; ++ ++ pinctrl_ltpi1_tach1_default: ltpi1-tach1-default { ++ function = "TACH1"; ++ groups = "TACH1"; ++ }; ++ ++ pinctrl_ltpi1_tach2_default: ltpi1-tach2-default { ++ function = "TACH2"; ++ groups = "TACH2"; ++ }; ++ ++ pinctrl_ltpi1_tach3_default: ltpi1-tach3-default { ++ function = "TACH3"; ++ groups = "TACH3"; ++ }; ++ ++ pinctrl_ltpi1_tach4_default: ltpi1-tach4-default { ++ function = "TACH4"; ++ groups = "TACH4"; ++ }; ++ ++ pinctrl_ltpi1_tach5_default: ltpi1-tach5-default { ++ function = "TACH5"; ++ groups = "TACH5"; ++ }; ++ ++ pinctrl_ltpi1_tach6_default: ltpi1-tach6-default { ++ function = "TACH6"; ++ groups = "TACH6"; ++ }; ++ ++ pinctrl_ltpi1_tach7_default: ltpi1-tach7-default { ++ function = "TACH7"; ++ groups = "TACH7"; ++ }; ++ ++ pinctrl_ltpi1_tach8_default: ltpi1-tach8-default { ++ function = "TACH8"; ++ groups = "TACH8"; ++ }; ++ ++ pinctrl_ltpi1_tach9_default: ltpi1-tach9-default { ++ function = "TACH9"; ++ groups = "TACH9"; ++ }; ++ ++ pinctrl_ltpi1_tach10_default: ltpi1-tach10-default { ++ function = "TACH10"; ++ groups = "TACH10"; ++ }; ++ ++ pinctrl_ltpi1_tach11_default: ltpi1-tach11-default { ++ function = "TACH11"; ++ groups = "TACH11"; ++ }; ++ ++ pinctrl_ltpi1_tach12_default: ltpi1-tach12-default { ++ function = "TACH12"; ++ groups = "TACH12"; ++ }; ++ ++ pinctrl_ltpi1_tach13_default: ltpi1-tach13-default { ++ function = "TACH13"; ++ groups = "TACH13"; ++ }; ++ ++ pinctrl_ltpi1_tach14_default: ltpi1-tach14-default { ++ function = "TACH14"; ++ groups = "TACH14"; ++ }; ++ ++ pinctrl_ltpi1_tach15_default: ltpi1-tach15-default { ++ function = "TACH15"; ++ groups = "TACH15"; ++ }; ++ ++ pinctrl_ltpi1_jtagm_default: ltpi1-jtagm-default { ++ function = "JTAGM"; ++ groups = "JTAGM"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c0_default: ltpi1-hvi3c0-default { ++ function = "I3C0"; ++ groups = "HVI3C0"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c1_default: ltpi1-hvi3c1-default { ++ function = "I3C1"; ++ groups = "HVI3C1"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c2_default: ltpi1-hvi3c2-default { ++ function = "I3C2"; ++ groups = "HVI3C2"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c3_default: ltpi1-hvi3c3-default { ++ function = "I3C3"; ++ groups = "HVI3C3"; ++ }; ++ ++ pinctrl_ltpi1_i3c4_default: ltpi1-i3c4-default { ++ function = "I3C4"; ++ groups = "I3C4"; ++ }; ++ ++ pinctrl_ltpi1_i3c5_default: ltpi1-i3c5-default { ++ function = "I3C5"; ++ groups = "I3C5"; ++ }; ++ ++ pinctrl_ltpi1_i3c6_default: ltpi1-i3c6-default { ++ function = "I3C6"; ++ groups = "I3C6"; ++ }; ++ ++ pinctrl_ltpi1_i3c7_default: ltpi1-i3c7-default { ++ function = "I3C7"; ++ groups = "I3C7"; ++ }; ++ ++ pinctrl_ltpi1_i3c8_default: ltpi1-i3c8-default { ++ function = "I3C8"; ++ groups = "I3C8"; ++ }; ++ ++ pinctrl_ltpi1_i3c9_default: ltpi1-i3c9-default { ++ function = "I3C9"; ++ groups = "I3C9"; ++ }; ++ ++ pinctrl_ltpi1_i3c10_default: ltpi1-i3c10-default { ++ function = "I3C10"; ++ groups = "I3C10"; ++ }; ++ ++ pinctrl_ltpi1_i3c11_default: ltpi1-i3c11-default { ++ function = "I3C11"; ++ groups = "I3C11"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c12_default: ltpi1-hvi3c12-default { ++ function = "I3C12"; ++ groups = "HVI3C12"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c13_default: ltpi1-hvi3c13-default { ++ function = "I3C13"; ++ groups = "HVI3C13"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c14_default: ltpi1-hvi3c14-default { ++ function = "I3C14"; ++ groups = "HVI3C14"; ++ }; ++ ++ pinctrl_ltpi1_hvi3c15_default: ltpi1-hvi3c15-default { ++ function = "I3C15"; ++ groups = "HVI3C15"; ++ }; ++ ++ pinctrl_ltpi1_adc0_default: ltpi1-adc0-default { ++ function = "ADC0"; ++ groups = "ADC0"; ++ }; ++ ++ pinctrl_ltpi1_adc1_default: ltpi1-adc1-default { ++ function = "ADC1"; ++ groups = "ADC1"; ++ }; ++ ++ pinctrl_ltpi1_adc2_default: ltpi1-adc2-default { ++ function = "ADC2"; ++ groups = "ADC2"; ++ }; ++ ++ pinctrl_ltpi1_adc3_default: ltpi1-adc3-default { ++ function = "ADC3"; ++ groups = "ADC3"; ++ }; ++ ++ pinctrl_ltpi1_adc4_default: ltpi1-adc4-default { ++ function = "ADC4"; ++ groups = "ADC4"; ++ }; ++ ++ pinctrl_ltpi1_adc5_default: ltpi1-adc5-default { ++ function = "ADC5"; ++ groups = "ADC5"; ++ }; ++ ++ pinctrl_ltpi1_adc6_default: ltpi1-adc6-default { ++ function = "ADC6"; ++ groups = "ADC6"; ++ }; ++ ++ pinctrl_ltpi1_adc7_default: ltpi1-adc7-default { ++ function = "ADC7"; ++ groups = "ADC7"; ++ }; ++ ++ pinctrl_ltpi1_adc8_default: ltpi1-adc8-default { ++ function = "ADC8"; ++ groups = "ADC8"; ++ }; ++ ++ pinctrl_ltpi1_adc9_default: ltpi1-adc9-default { ++ function = "ADC9"; ++ groups = "ADC9"; ++ }; ++ ++ pinctrl_ltpi1_adc10_default: ltpi1-adc10-default { ++ function = "ADC10"; ++ groups = "ADC10"; ++ }; ++ ++ pinctrl_ltpi1_adc11_default: ltpi1-adc11-default { ++ function = "ADC11"; ++ groups = "ADC11"; ++ }; ++ ++ pinctrl_ltpi1_adc12_default: ltpi1-adc12-default { ++ function = "ADC12"; ++ groups = "ADC12"; ++ }; ++ ++ pinctrl_ltpi1_adc13_default: ltpi1-adc13-default { ++ function = "ADC13"; ++ groups = "ADC13"; ++ }; ++ ++ pinctrl_ltpi1_adc14_default: ltpi1-adc14-default { ++ function = "ADC14"; ++ groups = "ADC14"; ++ }; ++ ++ pinctrl_ltpi1_adc15_default: ltpi1-adc15-default { ++ function = "ADC15"; ++ groups = "ADC15"; ++ }; ++ ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,659 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++#include ++#include ++ ++/ { ++ aliases { ++ i3c200 = <pi1_i3c0; ++ i3c201 = <pi1_i3c1; ++ i3c202 = <pi1_i3c2; ++ i3c203 = <pi1_i3c3; ++ i3c204 = <pi1_i3c4; ++ i3c205 = <pi1_i3c5; ++ i3c206 = <pi1_i3c6; ++ i3c207 = <pi1_i3c7; ++ i3c208 = <pi1_i3c8; ++ i3c209 = <pi1_i3c9; ++ i3c210 = <pi1_i3c10; ++ i3c211 = <pi1_i3c11; ++ i3c212 = <pi1_i3c12; ++ i3c213 = <pi1_i3c13; ++ i3c214 = <pi1_i3c14; ++ i3c215 = <pi1_i3c15; ++ }; ++ ++ ltpi1_bus: ltpi1_bus@50000000 { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ ltpi1_syscon: syscon@50c02000 { ++ compatible = "aspeed,ast1700-scu", "syscon", "simple-mfd"; ++ reg = <0x0 0x50c02000 0x0 0x1000>; ++ ranges = <0x0 0x0 0x0 0x50c02000 0x0 0x1000>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ ++ ltpi1_rst: reset-controller@200 { ++ reg = <0x0 0x200 0x0 0x40>; ++ }; ++ ++ ltpi1_clk: clock-controller@240 { ++ reg = <0x0 0x240 0x0 0x1c0>; ++ }; ++ ++ ltpi1_pinctrl: pinctrl@400 { ++ compatible = "aspeed,ast1700-pinctrl"; ++ reg = <0x0 0x400 0x0 0x100>; ++ }; ++ }; ++ ++ ltpi1_spi2: spi@50030000 { ++ reg = <0x0 0x50030000 0x0 0x1f0>, <0x2 0x80000000 0x0 0x04000000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <<pi1_syscon AST1700_CLK_AHB>; ++ resets = <<pi1_syscon AST1700_RESET_SPI2>; ++ num-cs = <2>; ++ ltpi-base = <0x0 0x54000000>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_pwm_tach: pwm-tach-controller@500c0000 { ++ compatible = "aspeed,ast2700-pwm-tach"; ++ reg = <0x0 0x500c0000 0 0x100>; ++ clocks = <<pi1_syscon AST1700_CLK_AHB>; ++ resets = <<pi1_syscon AST1700_RESET_PWM>; ++ #pwm-cells = <3>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_adc0: adc@50c00000 { ++ compatible = "aspeed,ast2700-adc0"; ++ reg = <0x0 0x50c00000 0 0x100>; ++ clocks = <<pi1_syscon AST1700_CLK_AHB>; ++ resets = <<pi1_syscon AST1700_RESET_ADC>; ++ interrupts-extended = <<pi1_intc1_0 24>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi1_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_adc1: adc@50c00100 { ++ compatible = "aspeed,ast2700-adc1"; ++ reg = <0x0 0x50c00100 0x0 0x100>; ++ clocks = <<pi1_syscon AST1700_CLK_AHB>; ++ resets = <<pi1_syscon AST1700_RESET_ADC>; ++ interrupts-extended = <<pi1_intc1_0 24>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi1_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_jtag: jtag@50c09000 { ++ compatible = "aspeed,ast2700-jtag"; ++ reg= <0x0 0x50c09000 0x0 0x40>; ++ clocks = <<pi1_syscon AST1700_CLK_AHB>; ++ resets = <<pi1_syscon AST1700_RESET_JTAG1>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c: bus@50c0f000 { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x50c0f000 0x1100>; ++ }; ++ ++ ltpi1_intc1: interrupt-controller@50c18000 { ++ compatible = "simple-mfd"; ++ reg = <0 0x50c18000 0 0x400>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0x0 0x0 0x0 0x50c18000 0x0 0x400>; ++ ++ ltpi1_intc1_0: interrupt-controller@100 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x100 0x0 0x10>; ++ interrupts-extended = <&intc0_11 8>; ++ }; ++ ++ ltpi1_intc1_1: interrupt-controller@110 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x110 0x0 0x10>; ++ interrupts-extended = <&intc0_11 9>; ++ }; ++ }; ++ ++ ltpi1_i3c0: i3c0@50c20000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c20000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 0>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C0CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C0>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c1: i3c1@50c21000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c21000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 1>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C1CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C1>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c2: i3c2@50c22000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c22000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 2>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C2CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C2>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c3: i3c3@50c23000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c23000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 3>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C3CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C3>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c4: i3c4@50c24000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c24000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 4>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C4CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C4>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c5: i3c5@50c25000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c25000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 5>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C5CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C5>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c6: i3c6@50c26000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c26000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 6>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C6CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C6>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c7: i3c7@50c27000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c27000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 7>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C7CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C7>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c8: i3c8@50c28000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c28000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 8>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C8CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C8>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c9: i3c9@50c29000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c29000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 9>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C9CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C9>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c10: i3c10@50c2a000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2a000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 10>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C10CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C10>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c11: i3c11@50c2b000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2b000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 11>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C11CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C11>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i3c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c12: i3c12@50c2c000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2c000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 12>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C12CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C12>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c13: i3c13@50c2d000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2d000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 13>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C13CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C13>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c14: i3c14@50c2e000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2e000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 14>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C14CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C14>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i3c15: i3c15@50c2f000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x50c2f000 0x0 0x1000>; ++ interrupts-extended = <<pi1_intc1_1 15>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_I3C15CLK>; ++ resets = <<pi1_syscon AST1700_RESET_I3C15>, <<pi1_syscon AST1700_RESET_I3CDMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_hvi3c15_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_ltpi: ltpi@50c34000 { ++ compatible = "aspeed-ast1700-ltpi"; ++ reg = <0x0 0x50c34000 0x0 0x100>; ++ clocks = <<pi1_syscon AST1700_CLK_GATE_LTPICLK>, ++ <<pi1_syscon AST1700_CLK_GATE_LTPIPHYCLK>; ++ clock-names = "ahb", "phy"; ++ resets = <<pi1_syscon AST1700_RESET_LTPI>; ++ remote-controller; ++ aspeed,scu = <<pi1_syscon>; ++ i2c-tunneling = <0x0>; ++ status = "okay"; ++ }; ++ ++ ltpi1_wdt0: watchdog@50c37000 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x50c37000 0x0 0x40>; ++ }; ++ }; ++}; ++ ++#include "aspeed-ltpi1-pinctrl.dtsi" ++ ++<pi1_i2c { ++ ltpi1_i2c_global: i2c-global-regs@0 { ++ compatible = "aspeed,i2c-global", "simple-mfd", "syscon"; ++ reg = <0x0 0x100>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c0: i2c-bus@100 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x100 0x80>, <0x1a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 0>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c1: i2c-bus@200 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x200 0x80>, <0x2a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 1>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c2: i2c-bus@300 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x300 0x80>, <0x3a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 2>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c3: i2c-bus@400 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x400 0x80>, <0x4a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 3>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c4: i2c-bus@500 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x500 0x80>, <0x5a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ interrupts-extended = <<pi1_intc1_0 4>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c5: i2c-bus@600 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x600 0x80>, <0x6a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 5>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c6: i2c-bus@700 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x700 0x80>, <0x7a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 6>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c7: i2c-bus@800 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x800 0x80>, <0x8a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 7>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c8: i2c-bus@900 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x900 0x80>, <0x9a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 8>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c9: i2c-bus@a00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xa00 0x80>, <0xaa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 9>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c10: i2c-bus@b00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xb00 0x80>, <0xba0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 10>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c11: i2c-bus@c00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xc00 0x80>, <0xca0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 11>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c12: i2c-bus@d00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xd00 0x80>, <0xda0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 12>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c13: i2c-bus@e00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xe00 0x80>, <0xea0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 13>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c14: i2c-bus@f00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xf00 0x80>, <0xfa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 14>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi1_i2c15: i2c-bus@1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x1000 0x80>, <0x10a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi1_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi1_syscon AST1700_CLK_APB>; ++ resets = <<pi1_syscon AST1700_RESET_I2C>; ++ interrupts-extended = <<pi1_intc1_0 15>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi1_i2c15_default>; ++ status = "disabled"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1800.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1800.dtsi +--- a/arch/arm64/boot/dts/aspeed/aspeed-ltpi1800.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/aspeed-ltpi1800.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,647 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++#include ++#include ++ ++/ { ++ aliases { ++ i3c100 = <pi0_i3c0; ++ i3c101 = <pi0_i3c1; ++ i3c102 = <pi0_i3c2; ++ i3c103 = <pi0_i3c3; ++ i3c104 = <pi0_i3c4; ++ i3c105 = <pi0_i3c5; ++ i3c106 = <pi0_i3c6; ++ i3c107 = <pi0_i3c7; ++ i3c108 = <pi0_i3c8; ++ i3c109 = <pi0_i3c9; ++ i3c110 = <pi0_i3c10; ++ i3c111 = <pi0_i3c11; ++ i3c112 = <pi0_i3c12; ++ i3c113 = <pi0_i3c13; ++ i3c114 = <pi0_i3c14; ++ i3c115 = <pi0_i3c15; ++ }; ++ ++ ltpi0_bus: ltpi0_bus@30000000 { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ ltpi0_syscon: syscon@30c02000 { ++ compatible = "aspeed,ast1800-scu", "syscon", "simple-mfd"; ++ reg = <0x0 0x30c02000 0x0 0x1000>; ++ ranges = <0x0 0x0 0x0 0x30c02000 0x0 0x1000>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ #clock-cells = <1>; ++ #reset-cells = <1>; ++ ++ ltpi0_rst: reset-controller@200 { ++ reg = <0x0 0x200 0x0 0x40>; ++ }; ++ ++ ltpi0_clk: clock-controller@240 { ++ reg = <0x0 0x240 0x0 0x1c0>; ++ }; ++ ++ ltpi0_pinctrl: pinctrl@400 { ++ compatible = "aspeed,ast1700-pinctrl"; ++ reg = <0x0 0x400 0x0 0x100>; ++ }; ++ }; ++ ++ ltpi0_pwm_tach: pwm-tach-controller@300c0000 { ++ compatible = "aspeed,ast2700-pwm-tach"; ++ reg = <0x0 0x300c0000 0 0x100>; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_PWM>; ++ #pwm-cells = <3>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_adc0: adc@30c00000 { ++ compatible = "aspeed,ast2700-adc0"; ++ reg = <0x0 0x30c00000 0 0x100>; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_ADC>; ++ interrupts-extended = <<pi0_intc1_1 1>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi0_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_adc1: adc@30c00100 { ++ compatible = "aspeed,ast2700-adc1"; ++ reg = <0x0 0x30c00100 0x0 0x100>; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_ADC>; ++ interrupts-extended = <<pi0_intc1_1 1>; ++ #io-channel-cells = <1>; ++ aspeed,scu = <<pi0_syscon>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_jtag: jtag@30c09000 { ++ compatible = "aspeed,ast2700-jtag"; ++ reg= <0x0 0x30c09000 0x0 0x40>; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_JTAG>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c: bus@30c0f000 { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x0 0x30c0f000 0x1100>; ++ }; ++ ++ ltpi0_intc1: interrupt-controller@30c18000 { ++ compatible = "simple-mfd"; ++ reg = <0 0x30c18000 0 0x400>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges = <0x0 0x0 0x0 0x30c18000 0x0 0x400>; ++ ++ ltpi0_intc1_0: interrupt-controller@100 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x100 0x0 0x10>; ++ interrupts-extended = <&intc0_11 6>; ++ }; ++ ++ ltpi0_intc1_1: interrupt-controller@110 { ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "aspeed,ast2700-intc-ic"; ++ reg = <0x0 0x110 0x0 0x10>; ++ interrupts-extended = <&intc0_11 7>; ++ }; ++ }; ++ ++ ltpi0_i3c0: i3c0@30c20000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c20000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 16>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C0>; ++ resets = <<pi0_syscon AST1800_RESET_I3C0>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c1: i3c1@30c21000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c21000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 17>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C1>; ++ resets = <<pi0_syscon AST1800_RESET_I3C1>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c2: i3c2@30c22000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c22000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 18>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C2>; ++ resets = <<pi0_syscon AST1800_RESET_I3C2>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c3: i3c3@30c23000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c23000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 19>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C3>; ++ resets = <<pi0_syscon AST1800_RESET_I3C3>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c4: i3c4@30c24000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c24000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 20>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C4>; ++ resets = <<pi0_syscon AST1800_RESET_I3C4>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c5: i3c5@30c25000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c25000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 21>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C5>; ++ resets = <<pi0_syscon AST1800_RESET_I3C5>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c6: i3c6@30c26000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c26000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 22>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C6>; ++ resets = <<pi0_syscon AST1800_RESET_I3C6>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c7: i3c7@30c27000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c27000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 23>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C7>; ++ resets = <<pi0_syscon AST1800_RESET_I3C7>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c8: i3c8@30c28000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c28000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 24>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C8>; ++ resets = <<pi0_syscon AST1800_RESET_I3C8>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c9: i3c9@30c29000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c29000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 25>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C9>; ++ resets = <<pi0_syscon AST1800_RESET_I3C9>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c10: i3c10@30c2a000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2a000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 26>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C10>; ++ resets = <<pi0_syscon AST1800_RESET_I3C10>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c11: i3c11@30c2b000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2b000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 27>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C11>; ++ resets = <<pi0_syscon AST1800_RESET_I3C11>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i3c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c12: i3c12@30c2c000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2c000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 28>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C12>; ++ resets = <<pi0_syscon AST1800_RESET_I3C12>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c13: i3c13@30c2d000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2d000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 29>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C13>; ++ resets = <<pi0_syscon AST1800_RESET_I3C13>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c14: i3c14@30c2e000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2e000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 30>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C14>; ++ resets = <<pi0_syscon AST1800_RESET_I3C14>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i3c15: i3c15@30c2f000 { ++ compatible = "aspeed-i3c-hci"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0x30c2f000 0x0 0x1000>; ++ interrupts-extended = <<pi0_intc1_0 31>; ++ i3c-scl-hz = <1000000>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_I3C15>; ++ resets = <<pi0_syscon AST1800_RESET_I3C15>, <<pi0_syscon AST1800_RESET_I3C_DMA>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_hvi3c15_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_ltpi: ltpi@30c34000 { ++ compatible = "aspeed-ltpi"; ++ reg = <0x0 0x30c34000 0x0 0x100>; ++ clocks = <<pi0_syscon AST1800_CLK_GATE_LTPI>; ++ clock-names = "ltpi"; ++ resets = <<pi0_syscon AST1800_RESET_LTPI>; ++ remote-controller; ++ aspeed,scu = <<pi0_syscon>; ++ i2c-tunneling = <0x0>; ++ status = "okay"; ++ }; ++ ++ ltpi0_wdt0: watchdog@30c37000 { ++ compatible = "aspeed,ast2700-wdt"; ++ reg = <0x0 0x30c37000 0x0 0x40>; ++ }; ++ }; ++}; ++ ++#include "aspeed-ltpi0-pinctrl.dtsi" ++ ++<pi0_i2c { ++ ltpi0_i2c_global: i2c-global-regs@0 { ++ compatible = "aspeed,i2c-global", "simple-mfd", "syscon"; ++ reg = <0x0 0x100>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c0: i2c-bus@100 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x100 0x80>, <0x1a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C0>; ++ interrupts-extended = <<pi0_intc1_0 0>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c0_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c1: i2c-bus@200 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x200 0x80>, <0x2a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C1>; ++ interrupts-extended = <<pi0_intc1_0 1>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c1_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c2: i2c-bus@300 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x300 0x80>, <0x3a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C2>; ++ interrupts-extended = <<pi0_intc1_0 2>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c2_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c3: i2c-bus@400 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x400 0x80>, <0x4a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C3>; ++ interrupts-extended = <<pi0_intc1_0 3>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c3_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c4: i2c-bus@500 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x500 0x80>, <0x5a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C4>; ++ interrupts-extended = <<pi0_intc1_0 4>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c4_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c5: i2c-bus@600 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x600 0x80>, <0x6a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C5>; ++ interrupts-extended = <<pi0_intc1_0 5>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c5_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c6: i2c-bus@700 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x700 0x80>, <0x7a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C6>; ++ interrupts-extended = <<pi0_intc1_0 6>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c6_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c7: i2c-bus@800 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x800 0x80>, <0x8a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C7>; ++ interrupts-extended = <<pi0_intc1_0 7>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c7_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c8: i2c-bus@900 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x900 0x80>, <0x9a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C8>; ++ interrupts-extended = <<pi0_intc1_0 8>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c8_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c9: i2c-bus@a00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xa00 0x80>, <0xaa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C9>; ++ interrupts-extended = <<pi0_intc1_0 9>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c9_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c10: i2c-bus@b00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xb00 0x80>, <0xba0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C10>; ++ interrupts-extended = <<pi0_intc1_0 10>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c10_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c11: i2c-bus@c00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xc00 0x80>, <0xca0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C11>; ++ interrupts-extended = <<pi0_intc1_0 11>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c11_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c12: i2c-bus@d00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xd00 0x80>, <0xda0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C12>; ++ interrupts-extended = <<pi0_intc1_0 12>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c12_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c13: i2c-bus@e00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xe00 0x80>, <0xea0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C13>; ++ interrupts-extended = <<pi0_intc1_0 13>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c13_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c14: i2c-bus@f00 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xf00 0x80>, <0xfa0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C14>; ++ interrupts-extended = <<pi0_intc1_0 14>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c14_default>; ++ status = "disabled"; ++ }; ++ ++ ltpi0_i2c15: i2c-bus@1000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x1000 0x80>, <0x10a0 0x20>; ++ compatible = "aspeed,ast2700-i2c"; ++ aspeed,global-regs = <<pi0_i2c_global>; ++ aspeed,enable-dma; ++ clocks = <<pi0_syscon AST1800_CLK_AHB>; ++ resets = <<pi0_syscon AST1800_RESET_I2C15>; ++ interrupts-extended = <<pi0_intc1_0 15>; ++ clock-frequency = <100000>; ++ debounce-level = <2>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_i2c15_default>; ++ status = "disabled"; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-ci-host.dts b/arch/arm64/boot/dts/aspeed/ast2700-ci-host.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-ci-host.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-ci-host.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,97 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-evb.dts" ++ ++/ { ++ model = "AST2700-CI-HOST"; ++}; ++ ++&bmc_dev0 { ++ status = "disabled"; ++}; ++ ++&xdma0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "disabled"; ++}; ++ ++&pcie0_mmbi0 { ++ status = "disabled"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&bmc_dev1 { ++ status = "disabled"; ++}; ++ ++&xdma1 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart2 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "disabled"; ++}; ++ ++&pcie1_mmbi4 { ++ status = "disabled"; ++}; ++ ++&pcie1 { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-dcscm.dts b/arch/arm64/boot/dts/aspeed/ast2700-dcscm.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-dcscm.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-dcscm.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,978 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "aspeed-g7.dtsi" ++#include ++#include ++ ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 1 // 1: EP, 0: RC ++ ++/ { ++ model = "AST2700-DCSCM"; ++ compatible = "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x02000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1{ ++ size = <0x0 0x02000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++#if 0 ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++#endif ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ io-channels = <&adc0 7>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm9_default>; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ aspeed,battery-sensing; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ aspeed,battery-sensing; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc15_default>; ++}; ++ ++&pinctrl1 { ++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage { ++ pins = "U25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c0_driving: i3c0-driving { ++ pins = "U25", "U26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c1_driving: i3c1-driving { ++ pins = "Y26", "AA24"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c2_driving: i3c2-driving { ++ pins = "R25", "AA26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c3_driving: i3c3-driving { ++ pins = "R26", "Y25"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&i3c0 { ++ /* BMC_HPM_I3C_I2C_14, If AST1060 I3C_BMC_PFR_SCM_SEL(GPION4)=0 and I3C_SCM_EN(GPION5)=0 */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ /* BMC_I2C_I3C1_SCL1 */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ /* BMC_I2C_I3C2_SCL2 */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ /* I3C_DBG_SCM */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++/* AST2700 i3c4 -> AST1060 i3c2 for MCTP over I3C. */ ++&i3c4 { ++ /* I3C_PFR_BMC */ ++ initial-role = "target"; ++ pid = <0x000007ec 0x06000000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ /* I3C_MNG_BMC_SCM */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ /* I3C_SPD_SCM */ ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++/* Enable UART2 and UART9 for obmc-console. */ ++&uart2 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&uart9 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++/* Enable UART7 and UART10 for obmc-console. */ ++&uart7 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&uart10 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++/* UART3, UART13 and UART14 will be tunnelded to LTPI UART channels. */ ++&uart3 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&uart13 { ++ status = "okay"; ++}; ++ ++&uart14 { ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <12500000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ u-boot@0 { ++ reg = <0x0 0x400000>; // 4MB ++ label = "u-boot"; ++ }; ++ u-boot-env@400000 { ++ reg = <0x400000 0x20000>; // 128KB ++ label = "u-boot-env"; ++ }; ++ kernel@420000 { ++ reg = <0x420000 0x900000>; // 9MB ++ label = "kernel"; ++ }; ++ rofs@d20000 { ++ reg = <0xd20000 0x24a0000>; // 36.625MB ++ label = "rofs"; ++ }; ++ rwfs@31c0000 { ++ reg = <0x31c0000 0xE40000>; // 14.25MB ++ label = "rwfs"; ++ }; ++ pfm@4000000 { ++ reg = <0x4000000 0x20000>; // 128KB ++ label = "pfm"; ++ }; ++ reserved-1@4020000 { ++ reg = <0x4020000 0x200000>; // 128KB ++ label = "reserved-1"; ++ }; ++ rc-image@4220000 { ++ reg = <0x4220000 0x3de0000>; // 63360KB ++ label = "rc-image"; ++ }; ++ image-stg@8000000 { ++ reg = <0x8000000 0x3de0000>; // 63360KB ++ label = "img-stg"; ++ }; ++ pfr-stg@bde0000 { ++ reg = <0xbde0000 0x100000>; // 1024KB ++ label = "pfr-stg"; ++ }; ++ cpld-stg@bee0000 { ++ reg = <0xbee0000 0x400000>; // 4096KB ++ label = "cpld-stg"; ++ }; ++ afm-stg@c2e0000 { ++ reg = <0xc2e0000 0x20000>; // 128KB ++ label = "afm-stg"; ++ }; ++ afm-rc@c300000 { ++ reg = <0xc300000 0x20000>; // 128KB ++ label = "afm-rc"; ++ }; ++ reserved-2@c320000 { ++ reg = <0xc320000 0x3ce0000>; // 62336KB ++ label = "reserved-2"; ++ }; ++ }; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <12500000>; ++ spi-tx-bus-width = <1>; ++ spi-rx-bus-width = <1>; ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ spi0_pch_bios@0 { ++ reg = <0x0 0x3fe0000>; ++ label = "spi0_pch_reserved"; ++ }; ++ spi0_pch_pfm@3fe0000 { ++ reg = <0x3fe0000 0x20000>; ++ label = "spi0_pch_pfm"; ++ }; ++ spi0_pch_stg@4000000 { ++ reg = <0x4000000 0x2000000>; ++ label = "spi0_pch_stg"; ++ }; ++ spi0_pch_rc@6000000 { ++ reg = <0x6000000 0x2000000>; ++ label = "spi0_pch_rc"; ++ }; ++ }; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <12500000>; ++ spi-tx-bus-width = <1>; ++ spi-rx-bus-width = <1>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <12500000>; ++ spi-tx-bus-width = <1>; ++ spi-rx-bus-width = <1>; ++ partitions { ++ compatible = "fixed-partitions"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ spi1_pch_reserved@0 { ++ reg = <0x0 0x7f0000>; ++ label = "spi1_pch_reserved"; ++ }; ++ ++ spi1_pch_stg@7f0000 { ++ reg = <0x7f0000 0x1400000>; ++ label = "spi1_pch_stg"; ++ }; ++ ++ spi1_pch_rc@1bf0000 { ++ reg = <0x1bf0000 0x1400000>; ++ label = "spi1_pch_rc"; ++ }; ++ ++ spi1_pch_pfm@2ff0000 { ++ reg = <0x2ff0000 0x10000>; ++ label = "spi1_pch_pfm"; ++ }; ++ ++ spi1_pch_bios@3000000 { ++ reg = <0x3000000 0x1000000>; ++ label = "spi1_pch_bios"; ++ }; ++ }; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <12500000>; ++ spi-tx-bus-width = <1>; ++ spi-rx-bus-width = <1>; ++ }; ++}; ++ ++<pi0 { ++ status = "okay"; ++}; ++ ++<pi1 { ++ /* Do not tunnel I2C10 to the HPM */ ++ i2c-tunneling = <0x2f>; ++ status = "okay"; ++}; ++ ++/* The LTPI GPIO table is defined in Chapter 6 of the AST2700 LTPI Design Guide v10.pdf. */ ++/* line 0:BMC_GPI0(LTPI0_INL16), 1:BMC_GPO0(LTPI0_ONL16), 2:BMC_GPI1(LTPI0_INL17), 3:BMC_GPO1(LTPI0_ONL17), etc... */ ++/* BMC_GPI[63:0] = LTPI0_INL[79:16], BMC_GPO[63:0] = LTPI0_ONL[79:16] */ ++/* BMC_GPI[71:64] = LTPI0_ILL[11:4], BMC_GPO[71:64] = LTPI0_OLL[11:4] */ ++/* BMC_GPI[111:72] = LTPI0_INL[127:88],BMC_GPO[111:72] = LTPI0_ONL[127:88] */ ++<pi0_gpio { ++ status = "okay"; ++ gpio-line-names = ++ /*00-07*/ "","","","","","","","", ++ /*08-15*/ "","","","","","","","", ++ /*16-23*/ "","FM_CPU_FBRK_DEBUG_N","","FM_BMC_TRUST_N","","FM_RST_BTN_OUT_CPU0_PLD_N_OE","","FM_PWR_BTN_OUT_CPU0_N", ++ /*24-31*/ "","FM_BMC_ONCTL_N","","","","","","", ++ /*32-39*/ "","BIOS_POST_CODE_LED_0","","BIOS_POST_CODE_LED_1","","BIOS_POST_CODE_LED_2","","BIOS_POST_CODE_LED_3", ++ /*40-47*/ "FP_ID_BTN_N","BIOS_POST_CODE_LED_4","FP_RST_BTN_N","BIOS_POST_CODE_LED_5","","BIOS_POST_CODE_LED_6","","BIOS_POST_CODE_LED_7", ++ /*48-55*/ "","A_P3V_BAT_SCALED_EN","","FM_TPM_EN_PULSE","","FM_SKT0_FAULT_LED","","FM_SKT1_FAULT_LED", ++ /*56-63*/ "","","","","","RST_BMC_SMB_PCIE_MUX_N","","SURPRISE_RESET", ++ /*64-71*/ "PWRGD_S0_PWROK_CPU0","","","","","","","", ++ /*72-79*/ "","","","","","","","", ++ /*80-87*/ "","","","","","","","", ++ /*88-95*/ "","","","","","","","", ++ /*96-103*/ "","","","","","","","", ++ /*104-111*/ "","","","","","","","", ++ /*112-119*/ "","","","","","","","", ++ /*120-127*/ "","","","","","","","", ++ /*128-135*/ "","","","","","","","", ++ /*136-143*/ "","","","","","","","", ++ /*144-151*/ "","","","","","","","", ++ /*152-159*/ "","","","","","","","", ++ /*160-167*/ "","","","","","","","", ++ /*168-175*/ "","","","","","","","", ++ /*176-183*/ "","","","","","","","", ++ /*184-191*/ "","","","","","","","", ++ /*192-199*/ "","","","","","","","", ++ /*200-207*/ "","","","","","","","", ++ /*208-215*/ "","","","","","","","", ++ /*216-223*/ "","","","","","","",""; ++ ++ gpio_17 { ++ gpio-hog; ++ gpios = <17 GPIO_ACTIVE_HIGH>; ++ output-high; ++ line-name = "FM_CPU_FBRK_DEBUG_N"; ++ }; ++ gpio_19 { ++ gpio-hog; ++ gpios = <19 GPIO_ACTIVE_HIGH>; ++ output-high; ++ line-name = "FM_BMC_TRUST_N"; ++ }; ++ gpio_25 { ++ gpio-hog; ++ gpios = <25 GPIO_ACTIVE_HIGH>; ++ output-low; ++ line-name = "FM_BMC_ONCTL_N"; ++ }; ++ gpio_53 { ++ gpio-hog; ++ gpios = <53 GPIO_ACTIVE_HIGH>; ++ output-high; ++ line-name = "FM_SKT0_FAULT_LED"; ++ }; ++ gpio_61 { ++ gpio-hog; ++ gpios = <61 GPIO_ACTIVE_HIGH>; ++ output-high; ++ line-name = "RST_BMC_SMB_PCIE_MUX_N"; ++ }; ++ gpio_63 { ++ gpio-hog; ++ gpios = <63 GPIO_ACTIVE_HIGH>; ++ output-low; ++ line-name = "SURPRISE_RESET"; ++ }; ++}; ++ ++&peci0 { ++ status = "okay"; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&syscon1 { ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&jtag1 { ++ status = "okay"; ++}; ++ ++&gpio1 { ++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage ++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving ++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving>; ++ pinctrl-names = "default"; ++ ++ gpio-line-names = ++ /*A0-A7*/ "","","","","","","","", ++ /*B0-B7*/ "","","","","","","","", ++ /*C0-C7*/ "","","","","","","","", ++ /*D0-D7*/ "","","","","","","","", ++ /*E0-E7*/ "","","","","FP_LED_STATUS_GREEN_CPLD_N","FP_LED_STATUS_AMBER_CPLD_N","","", ++ /*F0-F7*/ "","","","","","","","", ++ /*G0-G7*/ "","","","","","","","", ++ /*H0-H7*/ "","","","","","","","", ++ /*I0-I7*/ "","","","","","","","", ++ /*J0-J7*/ "","","","","","","","", ++ /*K0-K7*/ "","","","","","","","", ++ /*L0-L7*/ "","","","","","","","", ++ /*M0-M7*/ "","","","","","","","", ++ /*N0-N7*/ "","","","","","","","", ++ /*O0-O7*/ "","","","","","","","", ++ /*P0-P7*/ "","","","","","","","", ++ /*Q0-Q7*/ "","","","","","","","", ++ /*R0-R7*/ "","","","","","","","", ++ /*S0-S7*/ "","","","","","","","", ++ /*T0-T7*/ "","","","","","","","", ++ /*U0-U7*/ "","","","","","SCM_PHY_RST","","", ++ /*V0-V7*/ "","","","","","","","", ++ /*W0-W7*/ "","","","","","","","", ++ /*X0-X7*/ "","","","","","","","", ++ /*Y0-Y7*/ "","","","","IRQ_PMBUS1_ALERT_LVC3_N","FM_NVME_LVC3_ALERT_N","","", ++ /*Z0-Z7*/ "","","FM_NODE_ID0_N","FM_NODE_ID1_N","","PWRGD_AUX_PWRGD_PFR_CPU0","PWRGD_AUX_PWRGD_PFR_CPU1","", ++ /*AA0-AA7*/ "BMC_BOOT_DONE","","","","","","","", ++ /*AB0-AB7*/ "","","","","","","","", ++ /*AC0-AC7*/ "","","","","","","","", ++ /*AD0-AD7*/ "","","","","","","","", ++ /*AE0-AE7*/ "","","","","","","",""; ++}; ++ ++/* AST2700 A1 support SGPIO slave to 72*2 pins. */ ++/* The SGPIO slave table is defined in Chapter 7 of the AST2700 LTPI Design Guide v10.pdf. */ ++/* line 0:BMC_SGPI0(SCM_GPI0), 1:BMC_SGPO0(SCM_GPO0), 2:BMC_SGPI1(SCM_GPI1), 3:BMC_SGPO1(SCM_GPO1), etc... */ ++/* line 32:BMC_SGPI16(LTPI0_INL80), 33:BMC_SGPO16(LTPI0_ONL80), 34:LTPI0_INL81, 35:LTPI0_ONL81, etc... */ ++/* BMC_SGPI[15:0] = SCM_GPI0[15:0], BMC_SGPO[15:0] = BMC_SGPO0[15:0] */ ++/* BMC_SGPI[23:16] = LTPI0_INL[87:80], BMC_SGPO[23:16] = LTPI0_ONL[87:80] */ ++/* BMC_SGPI[31:24] = LTPI1_INL[87:80], BMC_SGPO[31:24] = LTPI1_ONL[87:80] */ ++/* BMC_SGPI[47:32] = LTPI0_INL[15:0], BMC_SGPO[47:32] = LTPI0_ONL[15:0] */ ++/* BMC_SGPI[63:48] = LTPI1_INL[15:0], BMC_SGPO[63:48] = LTPI1_ONL[15:0] */ ++/* BMC_SGPI[71:64] = SREG_GPO[7:0], BMC_SGPO[71:64] = SREG_GPI[7:0] */ ++/* The designe guide only define SCM_GPO9(FP_PWR_BTN_PFR_N) for PFR output pin. */ ++/* We use AST2700 SGPIOS line 18 (FP_PWR_BTN_PFR_N_BMC_IN) for x86-power-control power button input. */ ++&sgpios { ++ status = "okay"; ++ gpio-line-names = ++ /*00-07*/ "","","","","","","","", ++ /*08-15*/ "","","","","","","","", ++ /*16-23*/ "","","FP_PWR_BTN_PFR_N_BMC_IN","","","","","", ++ /*24-31*/ "","","","","","","","", ++ /*32-39*/ "","","","","","","","", ++ /*40-47*/ "","","","","","","","", ++ /*48-55*/ "","","","","","","","", ++ /*56-63*/ "","","","","","","","", ++ /*64-71*/ "","","","","","","","", ++ /*72-79*/ "","","","","","","","", ++ /*80-87*/ "","","","","","","","", ++ /*88-95*/ "","","","","","","","", ++ /*96-103*/ "","","","","","","","", ++ /*104-111*/ "","","","","","","","", ++ /*112-119*/ "","","","","","","","", ++ /*120-127*/ "","","","","","","","", ++ /*128-135*/ "","","","","","","","", ++ /*136-143*/ "","","","","","","",""; ++}; ++ ++&espi0 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++#if 0 //eSPI1 and SD are multi-functional pin, SD default on ++&espi1 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi1_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++#endif ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <4>; ++}; ++ ++&lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <5>; ++}; ++ ++&lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <6>; ++}; ++ ++&lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <7>; ++}; ++ ++&lpc1_ibt { ++ status = "okay"; ++}; ++ ++&lpc1_mbox { ++ status = "okay"; ++}; ++ ++&lpc1_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc1_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&disp_intf { ++ status = "okay"; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&bmc_dev1 { ++ status = "okay"; ++ memory-region = <&bmc_dev1_memory>; ++}; ++ ++&xdma1 { ++ status = "okay"; ++ memory-region = <&xdma_memory1>; ++}; ++ ++&pcie_vuart2 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart3 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <12>; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <13>; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <14>; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <15>; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "okay"; ++ bt-channel = <3>; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++/* Enable i2c0~i2c4 for LTPI testing. */ ++&i2c0 { ++ /* SMB_PMBUS1_SCM */ ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&i2c1 { ++ /* SMB_IPMB_SCL1 */ ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&i2c2 { ++ /* SMB_CPLD_SCL2 */ ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&i2c3 { ++ /* SMB_PMBUS2_SCM */ ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&i2c4 { ++ /* SMB_PMBUS1_SCM */ ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++/* Default i2c5 is tunnelded to LTPI. */ ++/* Enable i2c5 pinctrl for testing on board LM75. */ ++&i2c5 { ++ /* SMB_TMP_BMC */ ++ status = "okay"; ++}; ++ ++/* AST2700 i2c8 -> AST1060 i2c5 for PCH mailbox emulation test. */ ++&i2c8 { ++ /* SMB_PCIE_SCM */ ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c9 { ++ /* SMB_HOST_BMC */ ++ status = "okay"; ++ eeprom@50 { ++ compatible = "atmel,24c04"; ++ reg = <0x50>; ++ pagesize = <16>; ++ }; ++}; ++ ++/* AST2700 A1 i2c10 -> AST1060 i2c0 for PFR mailbox. */ ++&i2c10 { ++ /* SMB_HSBP_BMC */ ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c11 { ++ /* BMC_HPM_I3C_I2C_13 */ ++ status = "okay"; ++}; ++ ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ahpd0_default>; ++}; ++ ++&usb3ahp { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default &pinctrl_usb2axhp_default>; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++&vhubc { ++ status = "okay"; ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++ ++&uhci1 { ++ status = "okay"; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb-dual.dts b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb-dual.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb-dual.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb-dual.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,218 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-dcscm_ast1700-evb.dts" ++#include "aspeed-ltpi1.dtsi" ++ ++/ { ++ model = "AST2700-DCSCM_AST1700-EVB-DUAL"; ++ ++ ltpi1-iio-hwmon { ++ compatible = "iio-hwmon"; ++ io-channels = <<pi1_adc0 0>, <<pi1_adc0 1>, <<pi1_adc0 2>, <<pi1_adc0 3>, ++ <<pi1_adc0 4>, <<pi1_adc0 5>, <<pi1_adc0 6>, <<pi1_adc0 7>, ++ <<pi1_adc1 0>, <<pi1_adc1 1>, <<pi1_adc1 2>, <<pi1_adc1 3>, ++ <<pi1_adc1 4>, <<pi1_adc1 5>, <<pi1_adc1 6>, <<pi1_adc1 7>; ++ }; ++}; ++ ++<pi1_adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++}; ++ ++<pi1_adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++}; ++ ++<pi1 { ++ i2c-tunneling = <0x0>; ++ status = "okay"; ++}; ++ ++<pi1_gpio { ++ status = "okay"; ++}; ++ ++<pi1_i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi1_i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi1_i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i2c6 { ++ status = "disabled"; ++}; ++ ++&i2c7 { ++ status = "disabled"; ++}; ++ ++// The following I2C buses require enabling according to the ast2700-dcscm.dts. ++// i2c8 / i2c9 / i2c10 ++ ++&i2c11 { ++ status = "disabled"; ++}; ++ ++<pi1_i2c0 { ++ status = "okay"; ++}; ++ ++<pi1_i2c1 { ++ status = "okay"; ++}; ++ ++<pi1_i2c2 { ++ status = "okay"; ++}; ++ ++<pi1_i2c3 { ++ status = "okay"; ++}; ++ ++<pi1_i2c4 { ++ status = "okay"; ++}; ++ ++<pi1_i2c5 { ++ status = "okay"; ++}; ++ ++<pi1_i2c6 { ++ status = "okay"; ++}; ++ ++<pi1_i2c7 { ++ status = "okay"; ++}; ++ ++<pi1_i2c8 { ++ status = "okay"; ++}; ++ ++<pi1_i2c9 { ++ status = "okay"; ++}; ++ ++<pi1_i2c10 { ++ status = "okay"; ++}; ++ ++<pi1_i2c11 { ++ status = "okay"; ++}; ++ ++<pi1_i2c12 { ++ status = "okay"; ++}; ++ ++<pi1_i2c13 { ++ status = "okay"; ++}; ++ ++<pi1_i2c14 { ++ status = "okay"; ++}; ++ ++<pi1_i2c15 { ++ status = "okay"; ++}; ++ ++&uart8 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb.dts b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1700-evb.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,456 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-dcscm.dts" ++#include "aspeed-ltpi0.dtsi" ++ ++/ { ++ model = "AST2700-DCSCM_AST1700-EVB"; ++ ++ ltpi_fan0: ltpi-pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan1: ltpi-pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan2: ltpi-pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan3: ltpi-pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan4: ltpi-pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan5: ltpi-pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan6: ltpi-pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan7: ltpi-pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan8: ltpi-pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan9: ltpi-pwm-fan9 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 9 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan10: ltpi-pwm-fan10 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 10 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan11: ltpi-pwm-fan11 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 11 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan12: ltpi-pwm-fan12 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 12 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan13: ltpi-pwm-fan13 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 13 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan14: ltpi-pwm-fan14 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 14 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan15: ltpi-pwm-fan15 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 15 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi0-iio-hwmon { ++ compatible = "iio-hwmon"; ++ io-channels = <<pi0_adc0 0>, <<pi0_adc0 1>, <<pi0_adc0 2>, <<pi0_adc0 3>, ++ <<pi0_adc0 4>, <<pi0_adc0 5>, <<pi0_adc0 6>, <<pi0_adc0 7>, ++ <<pi0_adc1 0>, <<pi0_adc1 1>, <<pi0_adc1 2>, <<pi0_adc1 3>, ++ <<pi0_adc1 4>, <<pi0_adc1 5>, <<pi0_adc1 6>, <<pi0_adc1 7>; ++ }; ++}; ++ ++<pi0_pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_pwm0_default &pinctrl_ltpi0_pwm1_default ++ &pinctrl_ltpi0_pwm2_default &pinctrl_ltpi0_pwm3_default ++ &pinctrl_ltpi0_pwm4_default &pinctrl_ltpi0_pwm5_default ++ &pinctrl_ltpi0_pwm6_default &pinctrl_ltpi0_pwm7_default ++ &pinctrl_ltpi0_pwm8_default ++ &pinctrl_ltpi0_tach0_default &pinctrl_ltpi0_tach1_default ++ &pinctrl_ltpi0_tach2_default &pinctrl_ltpi0_tach3_default ++ &pinctrl_ltpi0_tach4_default &pinctrl_ltpi0_tach5_default ++ &pinctrl_ltpi0_tach6_default &pinctrl_ltpi0_tach7_default ++ &pinctrl_ltpi0_tach8_default &pinctrl_ltpi0_tach9_default ++ &pinctrl_ltpi0_tach10_default &pinctrl_ltpi0_tach11_default ++ &pinctrl_ltpi0_tach12_default &pinctrl_ltpi0_tach13_default ++ &pinctrl_ltpi0_tach14_default &pinctrl_ltpi0_tach15_default>; ++ ltpi_fan0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ ltpi_fan1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ ltpi_fan2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ ltpi_fan3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ ltpi_fan4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ ltpi_fan5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ ltpi_fan6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ ltpi_fan7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ ltpi_fan8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ ltpi_fan9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ ltpi_fan10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ ltpi_fan11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ ltpi_fan12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ ltpi_fan13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ ltpi_fan14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ ltpi_fan15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&sgpios { ++ /delete-property/ gpio-line-names; ++}; ++ ++<pi0_gpio { ++ /delete-property/ gpio-line-names; ++ /delete-node/ gpio_17; ++ /delete-node/ gpio_19; ++ /delete-node/ gpio_25; ++ /delete-node/ gpio_53; ++ /delete-node/ gpio_61; ++ /delete-node/ gpio_63; ++}; ++ ++<pi0_adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ltpi0_adc0_default &pinctrl_ltpi0_adc1_default ++ &pinctrl_ltpi0_adc2_default &pinctrl_ltpi0_adc3_default ++ &pinctrl_ltpi0_adc4_default &pinctrl_ltpi0_adc5_default ++ &pinctrl_ltpi0_adc6_default &pinctrl_ltpi0_adc7_default>; ++}; ++ ++<pi0_adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_ltpi0_adc8_default &pinctrl_ltpi0_adc9_default ++ &pinctrl_ltpi0_adc10_default &pinctrl_ltpi0_adc11_default ++ &pinctrl_ltpi0_adc12_default &pinctrl_ltpi0_adc13_default ++ &pinctrl_ltpi0_adc14_default &pinctrl_ltpi0_adc15_default>; ++}; ++ ++<pi0 { ++ i2c-tunneling = <0x0>; ++ status = "okay"; ++}; ++ ++<pi0_i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "disabled"; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&i2c4 { ++ status = "disabled"; ++}; ++ ++&i2c5 { ++ status = "disabled"; ++}; ++ ++<pi0_i2c0 { ++ status = "okay"; ++}; ++ ++<pi0_i2c1 { ++ status = "okay"; ++}; ++ ++<pi0_i2c2 { ++ status = "okay"; ++}; ++ ++<pi0_i2c3 { ++ status = "okay"; ++}; ++ ++<pi0_i2c4 { ++ status = "okay"; ++}; ++ ++<pi0_i2c5 { ++ status = "okay"; ++}; ++ ++<pi0_i2c6 { ++ status = "okay"; ++}; ++ ++<pi0_i2c7 { ++ status = "okay"; ++}; ++ ++<pi0_i2c8 { ++ status = "okay"; ++}; ++ ++<pi0_i2c9 { ++ status = "okay"; ++}; ++ ++<pi0_i2c10 { ++ status = "okay"; ++}; ++ ++<pi0_i2c11 { ++ status = "okay"; ++}; ++ ++<pi0_i2c12 { ++ status = "okay"; ++}; ++ ++<pi0_i2c13 { ++ status = "okay"; ++}; ++ ++<pi0_i2c14 { ++ status = "okay"; ++}; ++ ++<pi0_i2c15 { ++ status = "okay"; ++}; ++ ++&uart3 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1800-evb.dts b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1800-evb.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1800-evb.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-dcscm_ast1800-evb.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,436 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-dcscm.dts" ++#include "aspeed-ltpi1800.dtsi" ++ ++/ { ++ model = "AST2700-DCSCM_AST1800-EVB"; ++ ++ ltpi_fan0: ltpi-pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan1: ltpi-pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan2: ltpi-pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan3: ltpi-pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan4: ltpi-pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan5: ltpi-pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan6: ltpi-pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan7: ltpi-pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan8: ltpi-pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan9: ltpi-pwm-fan9 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 9 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan10: ltpi-pwm-fan10 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 10 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan11: ltpi-pwm-fan11 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 11 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan12: ltpi-pwm-fan12 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 12 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan13: ltpi-pwm-fan13 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 13 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan14: ltpi-pwm-fan14 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 14 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi_fan15: ltpi-pwm-fan15 { ++ compatible = "pwm-fan"; ++ pwms = <<pi0_pwm_tach 15 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ ltpi0-iio-hwmon { ++ compatible = "iio-hwmon"; ++ io-channels = <<pi0_adc0 0>, <<pi0_adc0 1>, <<pi0_adc0 2>, <<pi0_adc0 3>, ++ <<pi0_adc0 4>, <<pi0_adc0 5>, <<pi0_adc0 6>, <<pi0_adc0 7>, ++ <<pi0_adc1 0>, <<pi0_adc1 1>, <<pi0_adc1 2>, <<pi0_adc1 3>, ++ <<pi0_adc1 4>, <<pi0_adc1 5>, <<pi0_adc1 6>, <<pi0_adc1 7>; ++ }; ++}; ++ ++<pi0_jtag { ++ status = "okay"; ++}; ++ ++<pi0_pwm_tach { ++ status = "okay"; ++ ltpi_fan0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ ltpi_fan1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ ltpi_fan2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ ltpi_fan3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ ltpi_fan4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ ltpi_fan5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ ltpi_fan6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ ltpi_fan7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ ltpi_fan8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ ltpi_fan9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ ltpi_fan10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ ltpi_fan11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ ltpi_fan12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ ltpi_fan13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ ltpi_fan14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ ltpi_fan15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&sgpios { ++ /delete-property/ gpio-line-names; ++}; ++ ++<pi0_gpio { ++ /delete-property/ gpio-line-names; ++ /delete-node/ gpio_17; ++ /delete-node/ gpio_19; ++ /delete-node/ gpio_25; ++ /delete-node/ gpio_53; ++ /delete-node/ gpio_61; ++ /delete-node/ gpio_63; ++}; ++ ++<pi0_adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++}; ++ ++<pi0_adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++}; ++ ++<pi0 { ++ i2c-tunneling = <0x0>; ++}; ++ ++<pi0_i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++<pi0_i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++<pi0_i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "disabled"; ++}; ++ ++&i2c1 { ++ status = "disabled"; ++}; ++ ++&i2c2 { ++ status = "disabled"; ++}; ++ ++&i2c3 { ++ status = "disabled"; ++}; ++ ++&i2c4 { ++ status = "disabled"; ++}; ++ ++&i2c5 { ++ status = "disabled"; ++}; ++ ++<pi0_i2c0 { ++ status = "okay"; ++}; ++ ++<pi0_i2c1 { ++ status = "okay"; ++}; ++ ++<pi0_i2c2 { ++ status = "okay"; ++}; ++ ++<pi0_i2c3 { ++ status = "okay"; ++}; ++ ++<pi0_i2c4 { ++ status = "okay"; ++}; ++ ++<pi0_i2c5 { ++ status = "okay"; ++}; ++ ++<pi0_i2c6 { ++ status = "okay"; ++}; ++ ++<pi0_i2c7 { ++ status = "okay"; ++}; ++ ++<pi0_i2c8 { ++ status = "okay"; ++}; ++ ++<pi0_i2c9 { ++ status = "okay"; ++}; ++ ++<pi0_i2c10 { ++ status = "okay"; ++}; ++ ++<pi0_i2c11 { ++ status = "okay"; ++}; ++ ++<pi0_i2c12 { ++ status = "okay"; ++}; ++ ++<pi0_i2c13 { ++ status = "okay"; ++}; ++ ++<pi0_i2c14 { ++ status = "okay"; ++}; ++ ++<pi0_i2c15 { ++ status = "okay"; ++}; ++ ++&uart3 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evb-256-abr.dts b/arch/arm64/boot/dts/aspeed/ast2700-evb-256-abr.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evb-256-abr.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evb-256-abr.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,40 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-evb.dts" ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-256-abr.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evb-s0.dts b/arch/arm64/boot/dts/aspeed/ast2700-evb-s0.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evb-s0.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evb-s0.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,367 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include "ast2700-evb.dts" ++#include ++ ++/ { ++ model = "AST2700 EVB Test S0 Test"; ++}; ++ ++&pcie2 { ++ status = "disabled"; ++}; ++ ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++ ++&sdio_controller { ++ status = "disabled"; ++}; ++ ++&sdhci { ++ status = "disabled"; ++}; ++ ++&espi1 { ++ status = "okay"; ++ perif-dma-mode; ++#if 0 //TODO: No enough memory for espi1 MMBI ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi1_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++#endif ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++&can0 { ++ status = "disabled"; ++}; ++ ++// S0 use 0x10 as its slave address ++&i2c0 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c1 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c2 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c3 { ++ clock-frequency = <1000000>; ++ debounce-level = <6>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c4 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c5 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c6 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c7 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c8 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c9 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c10 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c11 { ++ clock-frequency = <1000000>; ++ debounce-level = <6>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c12 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c13 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c14 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c15 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@10 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06011000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06013000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06015000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06017000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06019000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601B000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601D000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601F000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evb-s1.dts b/arch/arm64/boot/dts/aspeed/ast2700-evb-s1.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evb-s1.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evb-s1.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,465 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++#include "ast2700-evb.dts" ++#include ++ ++/ { ++ model = "AST2700 EVB Test S1 Test"; ++}; ++ ++&bmc_dev0 { ++ status = "disabled"; ++}; ++ ++&xdma0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "disabled"; ++}; ++ ++&pcie0_mmbi0 { ++ status = "disabled"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&bmc_dev1 { ++ status = "disabled"; ++}; ++ ++&xdma1 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart2 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "disabled"; ++}; ++ ++&pcie1_mmbi4 { ++ status = "disabled"; ++}; ++ ++&pcie1 { ++ status = "okay"; ++}; ++ ++&sdio_controller { ++ status = "disabled"; ++}; ++ ++&sdhci { ++ status = "disabled"; ++}; ++ ++&espi1 { ++ status = "okay"; ++ perif-dma-mode; ++#if 0 // TODO: No enough memory for espi1 MMBI ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi1_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++#endif ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++&can0 { ++ status = "disabled"; ++}; ++ ++// S1 use 0x12 as its slave address ++&i2c0 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c1 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c2 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c3 { ++ clock-frequency = <1000000>; ++ debounce-level = <6>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c4 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c5 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c6 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c7 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c8 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c9 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c10 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c11 { ++ clock-frequency = <1000000>; ++ debounce-level = <6>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c12 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c13 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c14 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i2c15 { ++ clock-frequency = <1000000>; ++ debounce-level = <4>; ++ status = "okay"; ++ multi-master; ++ mctp-controller; ++ mctp@12 { ++ compatible = "mctp-i2c-controller"; ++ reg = <(0x12 | I2C_OWN_SLAVE_ADDRESS)>; ++ }; ++}; ++ ++&i3c0 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&sgpios { ++ status = "disabled"; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rmii"; ++ use-ncsi; ++ ++ pinctrl-names = "default"; ++ /* If you want to use RMII0 RCLKO as internal clock for RMII, ++ * add &pinctrl_rmii0_rclko_default in pinctrl-0. ++ */ ++ pinctrl-0 = <&pinctrl_rmii0_default>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rmii"; ++ use-ncsi; ++ ++ pinctrl-names = "default"; ++ /* If you want to use RMII1 RCLKO as internal clock for RMII, ++ * add &pinctrl_rmii1_rclko_default in pinctrl-0. ++ */ ++ pinctrl-0 = <&pinctrl_rmii1_default>; ++}; ++ ++&syscon1 { ++ mac0-clk-delay = <0 0 ++ 0 0 ++ 0 0>; ++ mac1-clk-delay = <0 0 ++ 0 0 ++ 0 0>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ad0_default>; ++}; ++ ++&uphy2a { ++ status = "okay"; ++}; ++ ++&usb3ahp { ++ status = "disabled"; ++}; ++ ++&usb3bhp { ++ status = "disabled"; ++}; ++ ++&vhubb0 { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "disabled"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evb.dts b/arch/arm64/boot/dts/aspeed/ast2700-evb.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evb.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evb.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,1270 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++#include "aspeed-g7.dtsi" ++#include ++#include ++#include ++ ++#define DUAL_NODE 0 // 1: DUAL_NODE, 0: SINGLE_NODE ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 1 // 1: EP, 0: RC ++#define PCIE2_RC 1 // 1: RC, 0: SGMII ++ ++/ { ++ model = "AST2700 EVB"; ++ compatible = "aspeed,ast2700-evb", "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x02000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1 { ++ size = <0x0 0x02000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++#if 0 ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++#endif ++ ++ espi0_mcyc_memory: mcyc0 { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ }; ++ ++ fan0: pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan1: pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan2: pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan3: pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan4: pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan5: pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan6: pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan7: pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan8: pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ status = "okay"; ++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, ++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, ++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, ++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default ++ &pinctrl_pwm2_default &pinctrl_pwm3_default ++ &pinctrl_pwm4_default &pinctrl_pwm5_default ++ &pinctrl_pwm6_default &pinctrl_pwm7_default ++ &pinctrl_pwm8_default ++ &pinctrl_tach0_default &pinctrl_tach1_default ++ &pinctrl_tach2_default &pinctrl_tach3_default ++ &pinctrl_tach4_default &pinctrl_tach5_default ++ &pinctrl_tach6_default &pinctrl_tach7_default ++ &pinctrl_tach8_default &pinctrl_tach9_default ++ &pinctrl_tach10_default &pinctrl_tach11_default ++ &pinctrl_tach12_default &pinctrl_tach13_default ++ &pinctrl_tach14_default &pinctrl_tach15_default>; ++ fan-0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ fan-1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ fan-2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ fan-3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ fan-4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ fan-5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ fan-6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ fan-7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ fan-8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ fan-9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ fan-10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ fan-11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ fan-12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ fan-13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ fan-14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ fan-15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&edac { ++ status = "okay"; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++&mctp2 { ++ status = "okay"; ++ memory-region = <&mctp2_reserved>; ++}; ++ ++&sgpiom0 { ++ status = "okay"; ++}; ++ ++&sgpiom1 { ++ status = "okay"; ++}; ++ ++&jtag1 { ++ status = "okay"; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default ++ &pinctrl_adc2_default &pinctrl_adc3_default ++ &pinctrl_adc4_default &pinctrl_adc5_default ++ &pinctrl_adc6_default &pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default ++ &pinctrl_adc10_default &pinctrl_adc11_default ++ &pinctrl_adc12_default &pinctrl_adc13_default ++ &pinctrl_adc14_default &pinctrl_adc15_default>; ++}; ++ ++&pinctrl0 { ++ pinctrl_emmcclk_driving: emmcclk-driving { ++ pins = "AC14"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_emmccmd_driving: emmccmd-driving { ++ pins = "AE15"; ++ drive-strength = <1>; ++ }; ++ pinctrl_emmc4bit_driving: emmcdat-driving { ++ pins = "AD14", "AE14", "AF14", "AB13"; ++ drive-strength = <1>; ++ }; ++ ++ pinctrl_emmc8bit_driving: emmcdat-driving { ++ pins = "AD14", "AE14", "AF14", "AB13", "AF13", "AC13", "AD13", "AE13"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&pinctrl1 { ++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage { ++ pins = "U25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c0_driving: i3c0-driving { ++ pins = "U25", "U26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c1_driving: i3c1-driving { ++ pins = "Y26", "AA24"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c2_driving: i3c2-driving { ++ pins = "R25", "AA26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c3_driving: i3c3-driving { ++ pins = "R26", "Y25"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c12_15_hv_voltage: i3chv-voltage { ++ pins = "W25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c12_driving: i3c12-driving { ++ pins = "W25", "Y23"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c13_driving: i3c13-driving { ++ pins = "Y24", "W21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c14_driving: i3c14-driving { ++ pins = "AA23", "AC22"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c15_driving: i3c15-driving { ++ pins = "AB22", "Y21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++ ++ pinctrl_rgmii1_driving: rgmii1-driving { ++ pins = "D19", "C19", "D15", "B12", "B10", "P13", ++ "C18", "C6", "C7", "D7", "N13", "C8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&gpio1 { ++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage &pinctrl_i3c12_15_hv_voltage ++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving ++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving ++ &pinctrl_i3c12_driving &pinctrl_i3c13_driving ++ &pinctrl_i3c14_driving &pinctrl_i3c15_driving>; ++ pinctrl-names = "default"; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++#if 0 ++&vuart0 { ++ virtual; ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ status = "okay"; ++}; ++#endif ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++#if 1 ++&spi2 { ++ compatible = "aspeed,ast2700-spi-txrx"; ++ pinctrl-0 = <&pinctrl_spi2_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ tpm0: tpmdev@0 { ++ compatible = "tcg,tpm_tis-spi"; ++ spi-max-frequency = <25000000>; ++ reg = <0>; ++ status = "okay"; ++ }; ++}; ++#else ++&spi2 { ++ compatible = "aspeed,ast2700-spi"; ++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ flash@0 { ++ status = "okay"; ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++#endif ++ ++&can0 { ++ status = "okay"; ++}; ++ ++&emmc_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++}; ++ ++&emmc { ++ status = "okay"; ++#if 1 ++ bus-width = <4>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcclk_driving ++ &pinctrl_emmccmd_driving ++ &pinctrl_emmc4bit_driving>; ++#else ++ bus-width = <8>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcg8_default ++ &pinctrl_emmcclk_driving ++ &pinctrl_emmccmd_driving ++ &pinctrl_emmc8bit_driving>; ++#endif ++ ++ non-removable; ++ max-frequency = <200000000>; ++}; ++ ++&ufs_controller { ++ status = "okay"; ++}; ++ ++&ufs { ++ status = "okay"; ++ lanes-per-direction = <2>; ++ ref-clk-freq = <26000000>; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio1 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy1: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ /* For DDR5 board */ ++ ti,rx-internal-delay = ; ++ ti,tx-internal-delay = ; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_rgmii1_driving>; ++}; ++ ++#if 0 // Default to disable RC & SGMII ++#define PCIE2_RC 1 // 1: RC, 0: SGMII ++#if PCIE2_RC ++&pcie2 { ++ status = "okay"; ++}; ++#else ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++#endif ++#endif ++ ++&syscon1 { ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&espi0 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ memory-region = <&espi0_mcyc_memory>; ++ perif-rtc-enable; ++ oob-dma-mode; ++ flash-dma-mode; ++#if 0 // eDAF mode: Change 1 to enable MIX mode in Linux, but HW mode in SPL would be overwritten ++ flash-edaf-mode = <0x0>; ++ flash-edaf-tgt-addr = <&edaf0>; ++#endif ++}; ++ ++&rtc_over_espi0 { ++ status = "okay"; ++}; ++ ++/* Enable UART2 and UART9 for obmc-console. */ ++&uart2 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&uart9 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++#if DUAL_NODE ++&espi1 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi1_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ perif-rtc-enable; ++ oob-dma-mode; ++ flash-dma-mode; ++#if 0 // eDAF mode: Change 1 to enable MIX mode in Linux, but HW mode in SPL would be overwritten ++ flash-edaf-mode = <0x0>; ++ flash-edaf-tgt-addr = <&edaf1>; ++#endif ++}; ++ ++&rtc_over_espi1 { ++ status = "okay"; ++}; ++ ++/* Enable UART7 and UART10 for obmc-console. */ ++&uart7 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++ ++&uart10 { ++ /delete-property/ pinctrl-names; ++ /delete-property/ pinctrl-0; ++ status = "okay"; ++}; ++#endif /* DUAL_NODE */ ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++#if 1 ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++#else ++&lpc0_pcc { ++ status = "okay"; ++ pcc-ports = <0x80>; ++}; ++#endif ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <4>; ++}; ++ ++&lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <5>; ++}; ++ ++&lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <6>; ++}; ++ ++&lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <7>; ++}; ++ ++&lpc1_ibt { ++ status = "okay"; ++}; ++ ++&lpc1_mbox { ++ status = "okay"; ++}; ++ ++#if 1 ++&lpc1_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++#else ++&lpc1_pcc { ++ status = "okay"; ++ pcc-ports = <0x80>; ++}; ++#endif ++ ++&lpc1_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&disp_intf { ++ status = "okay"; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&pcie0_mmbi0 { ++ status = "okay"; ++ memory-region = <&pcie0_mmbi0_memory>; ++ ++ bmc-int-value = /bits/ 8 <0x00>; ++ bmc-int-location = <0>; ++}; ++#else /* !PCIE0_EP */ ++&pcie0 { ++ status = "okay"; ++}; ++#endif /* PCIE0_EP */ ++ ++#if PCIE1_EP ++&bmc_dev1 { ++ status = "okay"; ++ memory-region = <&bmc_dev1_memory>; ++}; ++ ++&xdma1 { ++ status = "okay"; ++ memory-region = <&xdma_memory1>; ++}; ++ ++&pcie_vuart2 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart3 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <12>; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <13>; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <14>; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <15>; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "okay"; ++ bt-channel = <3>; ++}; ++ ++&pcie1_mmbi4 { ++ status = "okay"; ++ memory-region = <&pcie1_mmbi4_memory>; ++ ++ bmc-int-value = /bits/ 8 <0x00>; ++ bmc-int-location = <0>; ++}; ++#else /* !PCIE1_EP */ ++&pcie1 { ++ status = "okay"; ++}; ++#endif /* PCIE1_EP */ ++ ++&sdio_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++ ++ vcc_sdhci0: regulator-vcc-sdhci0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "SDHCI0 Vcc"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio1 ASPEED_GPIO(G, 6) GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ vccq_sdhci0: regulator-vccq-sdhci0 { ++ compatible = "regulator-gpio"; ++ regulator-name = "SDHCI0 VccQ"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio1 ASPEED_GPIO(G, 7) GPIO_ACTIVE_HIGH>; ++ gpios-states = <1>; ++ states = <3300000 1>, ++ <1800000 0>; ++ }; ++}; ++ ++&sdhci { ++ status = "okay"; ++ bus-width = <4>; ++ max-frequency = <125000000>; ++ /* DDR50 bits in CAPA2 are not supported */ ++ sdhci-caps-mask = <0x6 0x0>; ++ sdhci-drive-type = /bits/ 8 <3>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sd_default>; ++ vmmc-supply = <&vcc_sdhci0>; ++ vqmmc-supply = <&vccq_sdhci0>; ++ sd-uhs-sdr104; /* enable sdr104 to execute tuning */ ++}; ++ ++#if 1 ++&i2c0 { ++ status = "okay"; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; ++ ++&i2c2 { ++ status = "okay"; ++}; ++ ++&i2c3 { ++ status = "okay"; ++}; ++ ++&i2c4 { ++ status = "okay"; ++}; ++ ++&i2c5 { ++ status = "okay"; ++}; ++ ++&i2c6 { ++ status = "okay"; ++}; ++ ++&i2c7 { ++ status = "okay"; ++}; ++ ++&i2c8 { ++ status = "okay"; ++}; ++ ++&i2c11 { ++ status = "okay"; ++}; ++ ++&i2c12 { ++ status = "okay"; ++}; ++ ++&i2c13 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&uhci0 { ++ status = "okay"; ++ memory-region = <&uhci0_reserved>; ++}; ++ ++#endif ++ ++#if 1 ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&xhci0 { ++ status = "okay"; ++}; ++ ++&xhci1 { ++ status = "okay"; ++}; ++#endif ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ahpd0_default>; ++}; ++ ++&usb3ahp { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default &pinctrl_usb2axhp_default>; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++&vhubc { ++ status = "okay"; ++#if 0 ++ pinctrl-0 = <&pinctrl_usb2cud_default>; ++ aspeed,uart-ports = <12>; ++#endif ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++ ++&uhci1 { ++ status = "okay"; ++ memory-region = <&uhci1_reserved>; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; ++ ++#if 0 ++&soc0 { ++ mbox-ssp-0 { ++ compatible = "aspeed,aspeed-mbox"; ++ reg = <0x4 0x31480000 0x0 0x200000>, <0x0 0x10001000 0x0 0x1000>; ++ mboxes = <&mbox0 0>; ++ aspeed,tx-timeout = <100>; ++ }; ++}; ++#endif ++ ++#if 1 ++&soc0 { ++ mbox-bootmcu-1 { ++ compatible = "aspeed,aspeed-mbox"; ++ reg = <0x4 0x31880000 0x0 0x100000>, <0x4 0x31980000 0x0 0x100000>; ++ mboxes = <&mbox2 1>; ++ aspeed,tx-timeout = <10000>; ++ }; ++}; ++#endif +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evbeeprom.dts b/arch/arm64/boot/dts/aspeed/ast2700-evbeeprom.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evbeeprom.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evbeeprom.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,1179 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "aspeed-g7.dtsi" ++#include ++#include ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 1 // 1: EP, 0: RC ++ ++/ { ++ model = "AST2700-EVB"; ++ compatible = "aspeed,ast2700-evb", "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1{ ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ xdma_memory0: xdma0 { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++ }; ++ ++ xdma_memory1: xdma1 { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++ }; ++ }; ++ ++ fan0: pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan1: pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan2: pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan3: pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan4: pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan5: pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan6: pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan7: pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan8: pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ status = "okay"; ++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, ++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, ++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, ++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default ++ &pinctrl_pwm2_default &pinctrl_pwm3_default ++ &pinctrl_pwm4_default &pinctrl_pwm5_default ++ &pinctrl_pwm6_default &pinctrl_pwm7_default ++ &pinctrl_pwm8_default ++ &pinctrl_tach0_default &pinctrl_tach1_default ++ &pinctrl_tach2_default &pinctrl_tach3_default ++ &pinctrl_tach4_default &pinctrl_tach5_default ++ &pinctrl_tach6_default &pinctrl_tach7_default ++ &pinctrl_tach8_default &pinctrl_tach9_default ++ &pinctrl_tach10_default &pinctrl_tach11_default ++ &pinctrl_tach12_default &pinctrl_tach13_default ++ &pinctrl_tach14_default &pinctrl_tach15_default>; ++ fan-0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ fan-1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ fan-2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ fan-3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ fan-4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ fan-5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ fan-6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ fan-7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ fan-8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ fan-9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ fan-10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ fan-11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ fan-12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ fan-13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ fan-14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ fan-15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++&mctp2 { ++ status = "okay"; ++ memory-region = <&mctp2_reserved>; ++}; ++ ++&sgpiom0 { ++ status = "okay"; ++}; ++ ++&sgpiom1 { ++ status = "okay"; ++}; ++ ++&jtag1 { ++ status = "okay"; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default ++ &pinctrl_adc2_default &pinctrl_adc3_default ++ &pinctrl_adc4_default &pinctrl_adc5_default ++ &pinctrl_adc6_default &pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default ++ &pinctrl_adc10_default &pinctrl_adc11_default ++ &pinctrl_adc12_default &pinctrl_adc13_default ++ &pinctrl_adc14_default &pinctrl_adc15_default>; ++}; ++ ++&pinctrl1 { ++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage { ++ pins = "U25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c0_driving: i3c0-driving { ++ pins = "U25", "U26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c1_driving: i3c1-driving { ++ pins = "Y26", "AA24"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c2_driving: i3c2-driving { ++ pins = "R25", "AA26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c3_driving: i3c3-driving { ++ pins = "R26", "Y25"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c12_15_hv_voltage: i3chv-voltage { ++ pins = "W25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c12_driving: i3c12-driving { ++ pins = "W25", "Y23"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c13_driving: i3c13-driving { ++ pins = "Y24", "W21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c14_driving: i3c14-driving { ++ pins = "AA23", "AC22"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c15_driving: i3c15-driving { ++ pins = "AB22", "Y21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++ ++ pinctrl_rgmii1_driving: rgmii1-driving { ++ pins = "D19", "C19", "D15", "B12", "B10", "P13", ++ "C18", "C6", "C7", "D7", "N13", "C8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&gpio0 { ++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage &pinctrl_i3c12_15_hv_voltage ++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving ++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving ++ &pinctrl_i3c12_driving &pinctrl_i3c13_driving ++ &pinctrl_i3c14_driving &pinctrl_i3c15_driving>; ++ pinctrl-names = "default"; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++#if 1 ++&spi2 { ++ compatible = "aspeed,ast2700-spi-txrx"; ++ pinctrl-0 = <&pinctrl_spi2_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ spi-aspeed-full-duplex; ++ ++ tpm0: tpmdev@0 { ++ compatible = "tcg,tpm_tis-spi"; ++ spi-max-frequency = <34000000>; ++ reg = <0>; ++ status = "okay"; ++ }; ++}; ++#else ++&spi2 { ++ compatible = "aspeed,ast2700-spi"; ++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ flash@0 { ++ status = "okay"; ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++#endif ++ ++&emmc_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++}; ++ ++&emmc { ++ status = "okay"; ++#if 1 ++ bus-width = <4>; ++#else ++ bus-width = <8>; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcg8_default>; ++#endif ++ non-removable; ++ max-frequency = <200000000>; ++}; ++ ++&ufs_controller { ++ status = "okay"; ++}; ++ ++&ufs { ++ status = "okay"; ++ lanes-per-direction = <2>; ++ ref-clk-freq = <26000000>; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio1 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy1: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rgmii"; ++ phy-handle = <ðphy1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_rgmii1_driving>; ++}; ++ ++#if 0 ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++ ++&pcie2 { ++ status = "okay"; ++}; ++#endif ++ ++&syscon1 { ++ mac0-clk-delay = <0x33 0x30 ++ 0x10 0x10 ++ 0x10 0x10>; ++ mac1-clk-delay = <0x31 0x31 ++ 0x10 0x10 ++ 0x10 0x10>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&espi0 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <4>; ++}; ++ ++&lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <5>; ++}; ++ ++&lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <6>; ++}; ++ ++&lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <7>; ++}; ++ ++&lpc1_ibt { ++ status = "okay"; ++}; ++ ++&lpc1_mbox { ++ status = "okay"; ++}; ++ ++&lpc1_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc1_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&disp_intf { ++ status = "okay"; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&pcie0_mmbi0 { ++ status = "okay"; ++ memory-region = <&pcie0_mmbi0_memory>; ++ ++ mmbi-bmc-int-value = /bits/ 8 <0x00>; ++ mmbi-bmc-int-offset = <0x100000>; ++}; ++#else ++&pcie0 { ++ status = "okay"; ++}; ++#endif ++ ++#if PCIE1_EP ++&bmc_dev1 { ++ status = "okay"; ++ memory-region = <&bmc_dev1_memory>; ++}; ++ ++&xdma1 { ++ status = "okay"; ++ memory-region = <&xdma_memory1>; ++}; ++ ++&pcie_vuart2 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart3 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <12>; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <13>; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <14>; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <15>; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "okay"; ++ bt-channel = <3>; ++}; ++ ++&pcie1_mmbi4 { ++ status = "okay"; ++ memory-region = <&pcie1_mmbi4_memory>; ++ ++ mmbi-bmc-int-value = /bits/ 8 <0x44>; ++ mmbi-bmc-int-offset = <0x100400>; ++}; ++#else ++&pcie1 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&i3c0 { ++ status = "okay"; ++}; ++ ++&jtag0 { ++ status = "okay"; ++}; ++#endif ++ ++&sdio_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++ ++ vcc_sdhci0: regulator-vcc-sdhci0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "SDHCI0 Vcc"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 6) GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ vccq_sdhci0: regulator-vccq-sdhci0 { ++ compatible = "regulator-gpio"; ++ regulator-name = "SDHCI0 VccQ"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 7) GPIO_ACTIVE_HIGH>; ++ gpios-states = <1>; ++ states = <3300000 1>, ++ <1800000 0>; ++ }; ++ ++}; ++ ++&sdhci { ++ status = "okay"; ++ bus-width = <4>; ++ max-frequency = <100000000>; ++ /* DDR50 bits in CAPA2 are not supported */ ++ sdhci-caps-mask = <0x6 0x0>; ++ sdhci-drive-type = /bits/ 8 <3>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sd_default>; ++ vmmc-supply = <&vcc_sdhci0>; ++ vqmmc-supply = <&vccq_sdhci0>; ++ sd-uhs-sdr104; /* enable sdr104 to execute tuning */ ++}; ++ ++&i2c0 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c1 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c2 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c3 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c4 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c5 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c6 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c7 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c8 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c9 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c10 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c11 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c12 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c13 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c14 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c15 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++#if 0 ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&uhci0 { ++ status = "okay"; ++}; ++ ++#endif ++ ++#if 1 ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&xhci0 { ++ status = "okay"; ++}; ++ ++&xhci1 { ++ status = "okay"; ++}; ++#endif ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ahpd0_default>; ++}; ++ ++&usb3ahp { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default &pinctrl_usb2axhp_default>; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++&vhubc { ++ status = "okay"; ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++ ++&uhci1 { ++ status = "okay"; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evbi2c.dts b/arch/arm64/boot/dts/aspeed/ast2700-evbi2c.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-evbi2c.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-evbi2c.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,1179 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "aspeed-g7.dtsi" ++#include ++#include ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 1 // 1: EP, 0: RC ++ ++/ { ++ model = "AST2700-EVB"; ++ compatible = "aspeed,ast2700-evb", "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1{ ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ xdma_memory0: xdma0 { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++ }; ++ ++ xdma_memory1: xdma1 { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++ }; ++ }; ++ ++ fan0: pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan1: pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan2: pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan3: pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan4: pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan5: pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan6: pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan7: pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan8: pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ status = "okay"; ++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, ++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, ++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, ++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default ++ &pinctrl_pwm2_default &pinctrl_pwm3_default ++ &pinctrl_pwm4_default &pinctrl_pwm5_default ++ &pinctrl_pwm6_default &pinctrl_pwm7_default ++ &pinctrl_pwm8_default ++ &pinctrl_tach0_default &pinctrl_tach1_default ++ &pinctrl_tach2_default &pinctrl_tach3_default ++ &pinctrl_tach4_default &pinctrl_tach5_default ++ &pinctrl_tach6_default &pinctrl_tach7_default ++ &pinctrl_tach8_default &pinctrl_tach9_default ++ &pinctrl_tach10_default &pinctrl_tach11_default ++ &pinctrl_tach12_default &pinctrl_tach13_default ++ &pinctrl_tach14_default &pinctrl_tach15_default>; ++ fan-0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ fan-1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ fan-2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ fan-3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ fan-4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ fan-5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ fan-6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ fan-7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ fan-8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ fan-9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ fan-10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ fan-11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ fan-12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ fan-13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ fan-14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ fan-15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++&mctp2 { ++ status = "okay"; ++ memory-region = <&mctp2_reserved>; ++}; ++ ++&sgpiom0 { ++ status = "okay"; ++}; ++ ++&sgpiom1 { ++ status = "okay"; ++}; ++ ++&jtag1 { ++ status = "okay"; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default ++ &pinctrl_adc2_default &pinctrl_adc3_default ++ &pinctrl_adc4_default &pinctrl_adc5_default ++ &pinctrl_adc6_default &pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default ++ &pinctrl_adc10_default &pinctrl_adc11_default ++ &pinctrl_adc12_default &pinctrl_adc13_default ++ &pinctrl_adc14_default &pinctrl_adc15_default>; ++}; ++ ++&pinctrl1 { ++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage { ++ pins = "U25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c0_driving: i3c0-driving { ++ pins = "U25", "U26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c1_driving: i3c1-driving { ++ pins = "Y26", "AA24"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c2_driving: i3c2-driving { ++ pins = "R25", "AA26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c3_driving: i3c3-driving { ++ pins = "R26", "Y25"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c12_15_hv_voltage: i3chv-voltage { ++ pins = "W25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c12_driving: i3c12-driving { ++ pins = "W25", "Y23"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c13_driving: i3c13-driving { ++ pins = "Y24", "W21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c14_driving: i3c14-driving { ++ pins = "AA23", "AC22"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c15_driving: i3c15-driving { ++ pins = "AB22", "Y21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++ ++ pinctrl_rgmii1_driving: rgmii1-driving { ++ pins = "D19", "C19", "D15", "B12", "B10", "P13", ++ "C18", "C6", "C7", "D7", "N13", "C8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&gpio0 { ++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage &pinctrl_i3c12_15_hv_voltage ++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving ++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving ++ &pinctrl_i3c12_driving &pinctrl_i3c13_driving ++ &pinctrl_i3c14_driving &pinctrl_i3c15_driving>; ++ pinctrl-names = "default"; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++#if 1 ++&spi2 { ++ compatible = "aspeed,ast2700-spi-txrx"; ++ pinctrl-0 = <&pinctrl_spi2_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ spi-aspeed-full-duplex; ++ ++ tpm0: tpmdev@0 { ++ compatible = "tcg,tpm_tis-spi"; ++ spi-max-frequency = <34000000>; ++ reg = <0>; ++ status = "okay"; ++ }; ++}; ++#else ++&spi2 { ++ compatible = "aspeed,ast2700-spi"; ++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ flash@0 { ++ status = "okay"; ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++#endif ++ ++&emmc_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++}; ++ ++&emmc { ++ status = "okay"; ++#if 1 ++ bus-width = <4>; ++#else ++ bus-width = <8>; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcg8_default>; ++#endif ++ non-removable; ++ max-frequency = <200000000>; ++}; ++ ++&ufs_controller { ++ status = "okay"; ++}; ++ ++&ufs { ++ status = "okay"; ++ lanes-per-direction = <2>; ++ ref-clk-freq = <26000000>; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio1 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy1: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rgmii"; ++ phy-handle = <ðphy1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_rgmii1_driving>; ++}; ++ ++#if 0 ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++ ++&pcie2 { ++ status = "okay"; ++}; ++#endif ++ ++&syscon1 { ++ mac0-clk-delay = <0x33 0x30 ++ 0x10 0x10 ++ 0x10 0x10>; ++ mac1-clk-delay = <0x31 0x31 ++ 0x10 0x10 ++ 0x10 0x10>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&espi0 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <4>; ++}; ++ ++&lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <5>; ++}; ++ ++&lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <6>; ++}; ++ ++&lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <7>; ++}; ++ ++&lpc1_ibt { ++ status = "okay"; ++}; ++ ++&lpc1_mbox { ++ status = "okay"; ++}; ++ ++&lpc1_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc1_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&disp_intf { ++ status = "okay"; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&pcie0_mmbi0 { ++ status = "okay"; ++ memory-region = <&pcie0_mmbi0_memory>; ++ ++ mmbi-bmc-int-value = /bits/ 8 <0x00>; ++ mmbi-bmc-int-offset = <0x100000>; ++}; ++#else ++&pcie0 { ++ status = "okay"; ++}; ++#endif ++ ++#if PCIE1_EP ++&bmc_dev1 { ++ status = "okay"; ++ memory-region = <&bmc_dev1_memory>; ++}; ++ ++&xdma1 { ++ status = "okay"; ++ memory-region = <&xdma_memory1>; ++}; ++ ++&pcie_vuart2 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart3 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <12>; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <13>; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <14>; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <15>; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "okay"; ++ bt-channel = <3>; ++}; ++ ++&pcie1_mmbi4 { ++ status = "okay"; ++ memory-region = <&pcie1_mmbi4_memory>; ++ ++ mmbi-bmc-int-value = /bits/ 8 <0x44>; ++ mmbi-bmc-int-offset = <0x100400>; ++}; ++#else ++&pcie1 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&i3c0 { ++ status = "okay"; ++}; ++ ++&jtag0 { ++ status = "okay"; ++}; ++#endif ++ ++&sdio_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++ ++ vcc_sdhci0: regulator-vcc-sdhci0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "SDHCI0 Vcc"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 6) GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ vccq_sdhci0: regulator-vccq-sdhci0 { ++ compatible = "regulator-gpio"; ++ regulator-name = "SDHCI0 VccQ"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 7) GPIO_ACTIVE_HIGH>; ++ gpios-states = <1>; ++ states = <3300000 1>, ++ <1800000 0>; ++ }; ++ ++}; ++ ++&sdhci { ++ status = "okay"; ++ bus-width = <4>; ++ max-frequency = <100000000>; ++ /* DDR50 bits in CAPA2 are not supported */ ++ sdhci-caps-mask = <0x6 0x0>; ++ sdhci-drive-type = /bits/ 8 <3>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sd_default>; ++ vmmc-supply = <&vcc_sdhci0>; ++ vqmmc-supply = <&vccq_sdhci0>; ++ sd-uhs-sdr104; /* enable sdr104 to execute tuning */ ++}; ++ ++&i2c0 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c1 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c2 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c3 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c4 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c5 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c6 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c7 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c8 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c9 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c10 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c11 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c12 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c13 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c14 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++&i2c15 { ++ status = "okay"; ++ clock-frequency = <1000000>; ++ multi-master; ++}; ++ ++#if 0 ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&uhci0 { ++ status = "okay"; ++}; ++ ++#endif ++ ++#if 1 ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&xhci0 { ++ status = "okay"; ++}; ++ ++&xhci1 { ++ status = "okay"; ++}; ++#endif ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ahpd0_default>; ++}; ++ ++&usb3ahp { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default &pinctrl_usb2axhp_default>; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++&vhubc { ++ status = "okay"; ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++ ++&uhci1 { ++ status = "okay"; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-fpga.dts b/arch/arm64/boot/dts/aspeed/ast2700-fpga.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-fpga.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-fpga.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,1132 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "aspeed-g7.dtsi" ++#include ++#include ++ ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 1 // 1: EP, 0: RC ++#define PCIE2_RC 1 // 1: RC, 0: SGMII ++ ++/ { ++ model = "AST2700-FPGA"; ++ compatible = "aspeed,ast2700-evb", "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1{ ++ size = <0x0 0x02c00000>; ++ alignment = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ }; ++ ++ fan0: pwm-fan0 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 0 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan1: pwm-fan1 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 1 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan2: pwm-fan2 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 2 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan3: pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan4: pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan5: pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan6: pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan7: pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan8: pwm-fan8 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 8 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ status = "okay"; ++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, ++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, ++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, ++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default ++ &pinctrl_pwm2_default &pinctrl_pwm3_default ++ &pinctrl_pwm4_default &pinctrl_pwm5_default ++ &pinctrl_pwm6_default &pinctrl_pwm7_default ++ &pinctrl_pwm8_default ++ &pinctrl_tach0_default &pinctrl_tach1_default ++ &pinctrl_tach2_default &pinctrl_tach3_default ++ &pinctrl_tach4_default &pinctrl_tach5_default ++ &pinctrl_tach6_default &pinctrl_tach7_default ++ &pinctrl_tach8_default &pinctrl_tach9_default ++ &pinctrl_tach10_default &pinctrl_tach11_default ++ &pinctrl_tach12_default &pinctrl_tach13_default ++ &pinctrl_tach14_default &pinctrl_tach15_default>; ++ fan-0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ fan-1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ fan-2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ fan-3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ fan-4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ fan-5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ fan-6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ fan-7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ fan-8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ fan-9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ fan-10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ fan-11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ fan-12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ fan-13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ fan-14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ fan-15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++&mctp2 { ++ status = "okay"; ++ memory-region = <&mctp2_reserved>; ++}; ++ ++&sgpiom0 { ++ status = "okay"; ++}; ++ ++&sgpiom1 { ++ status = "okay"; ++}; ++ ++&jtag1 { ++ status = "okay"; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default ++ &pinctrl_adc2_default &pinctrl_adc3_default ++ &pinctrl_adc4_default &pinctrl_adc5_default ++ &pinctrl_adc6_default &pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default ++ &pinctrl_adc10_default &pinctrl_adc11_default ++ &pinctrl_adc12_default &pinctrl_adc13_default ++ &pinctrl_adc14_default &pinctrl_adc15_default>; ++}; ++ ++&pinctrl1 { ++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage { ++ pins = "U25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c0_driving: i3c0-driving { ++ pins = "U25", "U26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c1_driving: i3c1-driving { ++ pins = "Y26", "AA24"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c2_driving: i3c2-driving { ++ pins = "R25", "AA26"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c3_driving: i3c3-driving { ++ pins = "R26", "Y25"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c12_15_hv_voltage: i3chv-voltage { ++ pins = "W25"; ++ power-source = <1800>; ++ }; ++ ++ pinctrl_i3c12_driving: i3c12-driving { ++ pins = "W25", "Y23"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c13_driving: i3c13-driving { ++ pins = "Y24", "W21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c14_driving: i3c14-driving { ++ pins = "AA23", "AC22"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_i3c15_driving: i3c15-driving { ++ pins = "AB22", "Y21"; ++ drive-strength = <2>; ++ }; ++ ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++ ++ pinctrl_rgmii1_driving: rgmii1-driving { ++ pins = "D19", "C19", "D15", "B12", "B10", "P13", ++ "C18", "C6", "C7", "D7", "N13", "C8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&gpio0 { ++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage &pinctrl_i3c12_15_hv_voltage ++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving ++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving ++ &pinctrl_i3c12_driving &pinctrl_i3c13_driving ++ &pinctrl_i3c14_driving &pinctrl_i3c15_driving>; ++ pinctrl-names = "default"; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++#if 1 ++&spi2 { ++ compatible = "aspeed,ast2700-spi-txrx"; ++ pinctrl-0 = <&pinctrl_spi2_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ spi-aspeed-full-duplex; ++ ++ tpm0: tpmdev@0 { ++ compatible = "tcg,tpm_tis-spi"; ++ spi-max-frequency = <25000000>; ++ reg = <0>; ++ status = "okay"; ++ }; ++}; ++#else ++&spi2 { ++ compatible = "aspeed,ast2700-spi"; ++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ flash@0 { ++ status = "okay"; ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++#endif ++ ++&can0 { ++ status = "okay"; ++}; ++ ++&emmc_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++}; ++ ++&emmc { ++ status = "okay"; ++#if 1 ++ bus-width = <4>; ++#else ++ bus-width = <8>; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcg8_default>; ++#endif ++ non-removable; ++ max-frequency = <200000000>; ++}; ++ ++&ufs_controller { ++ status = "okay"; ++}; ++ ++&ufs { ++ status = "okay"; ++ lanes-per-direction = <2>; ++ ref-clk-freq = <26000000>; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio1 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy1: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ /* For DDR5 board */ ++ ti,rx-internal-delay = ; ++ ti,tx-internal-delay = ; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_rgmii1_driving>; ++}; ++ ++#if PCIE2_RC ++&pcie2 { ++ status = "okay"; ++}; ++#else ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++#endif ++ ++&syscon1 { ++ mac0-clk-delay = <0x18 0x16 ++ 0x10 0x10 ++ 0x10 0x10>; ++ mac1-clk-delay = <0x18 0x17 ++ 0x10 0x10 ++ 0x10 0x10>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&espi0 { ++ status = "okay"; ++ perif-dma-mode; ++ perif-mmbi-enable; ++ perif-mmbi-src-addr = <0x0 0xa8000000>; ++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>; ++ perif-mmbi-instance-num = <0x1>; ++ perif-mcyc-enable; ++ perif-mcyc-src-addr = <0x0 0x98000000>; ++ perif-mcyc-size = <0x0 0x10000>; ++ oob-dma-mode; ++ flash-dma-mode; ++}; ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <4>; ++}; ++ ++&lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <5>; ++}; ++ ++&lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <6>; ++}; ++ ++&lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <7>; ++}; ++ ++&lpc1_ibt { ++ status = "okay"; ++}; ++ ++&lpc1_mbox { ++ status = "okay"; ++}; ++ ++&lpc1_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc1_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&disp_intf { ++ status = "okay"; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&pcie0_mmbi0 { ++ status = "okay"; ++ memory-region = <&pcie0_mmbi0_memory>; ++ ++ bmc-int-value = /bits/ 8 <0x00>; ++ bmc-int-location = <0>; ++}; ++#else ++&pcie0 { ++ status = "okay"; ++}; ++#endif ++ ++#if PCIE1_EP ++&bmc_dev1 { ++ status = "okay"; ++ memory-region = <&bmc_dev1_memory>; ++}; ++ ++&xdma1 { ++ status = "okay"; ++ memory-region = <&xdma_memory1>; ++}; ++ ++&pcie_vuart2 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart3 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <12>; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <13>; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <14>; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <15>; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "okay"; ++ bt-channel = <3>; ++}; ++ ++&pcie1_mmbi4 { ++ status = "okay"; ++ memory-region = <&pcie1_mmbi4_memory>; ++ ++ bmc-int-value = /bits/ 8 <0x00>; ++ bmc-int-location = <0>; ++}; ++#else ++&pcie1 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&i3c0 { ++ status = "okay"; ++}; ++ ++&jtag0 { ++ status = "okay"; ++}; ++#endif ++ ++&sdio_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++ ++ vcc_sdhci0: regulator-vcc-sdhci0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "SDHCI0 Vcc"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 6) GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ vccq_sdhci0: regulator-vccq-sdhci0 { ++ compatible = "regulator-gpio"; ++ regulator-name = "SDHCI0 VccQ"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <3300000>; ++ gpios = <&gpio0 ASPEED_GPIO(G, 7) GPIO_ACTIVE_HIGH>; ++ gpios-states = <1>; ++ states = <3300000 1>, ++ <1800000 0>; ++ }; ++ ++}; ++ ++&sdhci { ++ status = "okay"; ++ bus-width = <4>; ++ max-frequency = <100000000>; ++ /* DDR50 bits in CAPA2 are not supported */ ++ sdhci-caps-mask = <0x6 0x0>; ++ sdhci-drive-type = /bits/ 8 <3>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_sd_default>; ++ vmmc-supply = <&vcc_sdhci0>; ++ vqmmc-supply = <&vccq_sdhci0>; ++ sd-uhs-sdr104; /* enable sdr104 to execute tuning */ ++}; ++ ++#if 1 ++&i2c0 { ++ status = "okay"; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; ++ ++&i2c2 { ++ status = "okay"; ++}; ++ ++&i2c3 { ++ status = "okay"; ++}; ++ ++&i2c4 { ++ status = "okay"; ++}; ++ ++&i2c5 { ++ status = "okay"; ++}; ++ ++&i2c6 { ++ status = "okay"; ++}; ++ ++&i2c7 { ++ status = "okay"; ++}; ++ ++&i2c8 { ++ status = "okay"; ++}; ++ ++&i2c11 { ++ status = "okay"; ++}; ++ ++&i2c12 { ++ status = "okay"; ++}; ++ ++&i2c13 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&uhci0 { ++ status = "okay"; ++ memory-region = <&uhci0_reserved>; ++}; ++ ++#endif ++ ++#if 1 ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&xhci0 { ++ status = "okay"; ++}; ++ ++&xhci1 { ++ status = "okay"; ++}; ++#endif ++ ++&vhuba0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb2ahpd0_default>; ++}; ++ ++&usb3ahp { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_usb3axhp_default &pinctrl_usb2axhp_default>; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++#if 0 ++&vhubc { ++ status = "okay"; ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++#endif ++ ++&uhci1 { ++ status = "okay"; ++ memory-region = <&uhci1_reserved>; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-ncsi.dts b/arch/arm64/boot/dts/aspeed/ast2700-ncsi.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-ncsi.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-ncsi.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,52 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-evb.dts" ++ ++/ { ++ model = "AST2700-NCSI"; ++}; ++ ++&sgpios { ++ status = "disabled"; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rmii"; ++ use-ncsi; ++ ++ pinctrl-names = "default"; ++ /* If you want to use RMII0 RCLKO as internal clock for RMII, ++ * add &pinctrl_rmii0_rclko_default in pinctrl-0. ++ */ ++ pinctrl-0 = <&pinctrl_rmii0_default>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rmii"; ++ use-ncsi; ++ ++ pinctrl-names = "default"; ++ /* If you want to use RMII1 RCLKO as internal clock for RMII, ++ * add &pinctrl_rmii1_rclko_default in pinctrl-0. ++ */ ++ pinctrl-0 = <&pinctrl_rmii1_default>; ++}; ++ ++&syscon1 { ++ mac0-clk-delay = <0 0 ++ 0 0 ++ 0 0>; ++ mac1-clk-delay = <0 0 ++ 0 0 ++ 0 0>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-raw.dts b/arch/arm64/boot/dts/aspeed/ast2700-raw.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-raw.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-raw.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,220 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "ast2700-evb.dts" ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@2 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++ ++ flash@1 { ++ status = "disabled"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <2>; ++ spi-rx-bus-width = <2>; ++ }; ++}; ++ ++&spi2 { ++ status = "disabled"; ++}; ++ ++&emmc_controller { ++ status = "disabled"; ++}; ++ ++&emmc { ++ status = "disabled"; ++}; ++ ++&ufs_controller { ++ status = "disabled"; ++}; ++ ++&ufs { ++ status = "disabled"; ++}; ++ ++&video0 { ++ status = "disabled"; ++}; ++ ++&video1 { ++ status = "disabled"; ++}; ++ ++&disp_intf { ++ status = "disabled"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "disabled"; ++}; ++ ++&xdma0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart0 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "disabled"; ++}; ++ ++&pcie0_mmbi0 { ++ status = "disabled"; ++}; ++#else ++&pcie0 { ++ status = "disabled"; ++}; ++#endif ++ ++#if PCIE1_EP ++&bmc_dev1 { ++ status = "disabled"; ++}; ++ ++&xdma1 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart2 { ++ status = "disabled"; ++}; ++ ++&pcie_vuart3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs0 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs1 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs2 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_kcs3 { ++ status = "disabled"; ++}; ++ ++&pcie_lpc1_ibt { ++ status = "disabled"; ++}; ++ ++&pcie1_mmbi4 { ++ status = "disabled"; ++}; ++#else ++&pcie1 { ++ status = "disabled"; ++}; ++#endif ++ ++&sdio_controller { ++ status = "disabled"; ++}; ++ ++&sdhci { ++ status = "disabled"; ++}; ++ +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi b/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi +--- a/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,126 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/* AST2700 reserved memories with no-map property */ ++ ++#if DUAL_NODE ++espi1_mmbi_memory: espi1-mmbi-memory@418000000 { ++ reg = <0x4 0x18000000 0x0 0x4000000>; ++ no-map; ++}; ++ ++edaf1: edaf1-memory@41c000000 { ++ reg = <0x4 0x1c000000 0x0 0x4000000>; ++ no-map; ++}; ++#endif ++ ++edaf0: eadf0-memory@428000000 { ++ reg = <0x4 0x28000000 0x0 0x4000000>; ++ no-map; ++}; ++ ++espi0_mmbi_memory: espi0-mmbi-memory@424000000 { ++ reg = <0x4 0x24000000 0x0 0x4000000>; ++ no-map; ++}; ++ ++#if PCIE0_EP ++bmc_dev0_memory: bmc-dev0-memory@423800000 { ++ reg = <0x4 0x23800000 0x0 0x100000>; ++ no-map; ++}; ++ ++xdma_memory0: xdma0 { ++ size = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++pcie0_mmbi0_memory: pcie0-mmbi0-memory@423a00000 { ++ reg = <0x4 0x23a00000 0x0 0x200000>; ++ no-map; ++}; ++#endif ++ ++#if PCIE1_EP ++bmc_dev1_memory: bmc-dev1-memory@423900000 { ++ reg = <0x4 0x23900000 0x0 0x100000>; ++ no-map; ++}; ++ ++xdma_memory1: xdma1 { ++ size = <0x0 0x00100000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++pcie1_mmbi4_memory: pcie1-mmbi4-memory@423c00000 { ++ reg = <0x4 0x23c00000 0x0 0x200000>; ++ no-map; ++}; ++#endif ++ ++pcie2_mmbi0_memory: pcie2-mmbi0-memory@423e00000 { ++ reg = <0x4 0x23e00000 0x0 0x200000>; ++ no-map; ++}; ++ ++ssp_memory: ssp-memory@42c000000 { ++ reg = <0x4 0x2c000000 0x0 0x2000000>; ++ no-map; ++}; ++ ++tsp_memory: tsp-memory@42e000000 { ++ reg = <0x4 0x2e000000 0x0 0x2000000>; ++ no-map; ++}; ++ ++ipc_ssp_share: ipc-ssp-share@431080000 { ++ reg = <0x4 0x31080000 0x0 0x800000>; ++ no-map; ++}; ++ ++ipc_bootmcu_share: ipc-bootmcu-share@431880000 { ++ reg = <0x4 0x31880000 0x0 0x2b0000>; ++ no-map; ++}; ++ ++uhci0_reserved: uhci0-reserved@431b30000 { ++ reg = <0x4 0x31b30000 0x0 0x40000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++uhci1_reserved: uhci1-reserved@431b70000 { ++ reg = <0x4 0x31b70000 0x0 0x40000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++vbios_base0: vbios-base0@431bb0000 { ++ reg = <0x4 0x31bb0000 0x0 0x10000>; ++ no-map; ++}; ++ ++vbios_base1: vbios-base1@431bc0000 { ++ reg = <0x4 0x31bc0000 0x0 0x10000>; ++ no-map; ++}; ++ ++mctp0_reserved: mctp0-reserved@431bd0000 { ++ reg = <0x4 0x31bd0000 0x0 0x10000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++mctp1_reserved: mctp1-reserved@431be0000 { ++ reg = <0x4 0x31be0000 0x0 0x10000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; ++ ++mctp2_reserved: mctp2-reserved@431bf0000 { ++ reg = <0x4 0x31bf0000 0x0 0x10000>; ++ compatible = "shared-dma-pool"; ++ no-map; ++}; +diff --git a/arch/arm64/boot/dts/aspeed/ast2700-slt.dts b/arch/arm64/boot/dts/aspeed/ast2700-slt.dts +--- a/arch/arm64/boot/dts/aspeed/ast2700-slt.dts 1970-01-01 00:00:00.000000000 +0000 ++++ b/arch/arm64/boot/dts/aspeed/ast2700-slt.dts 2025-12-23 10:16:06.862271766 +0000 +@@ -0,0 +1,857 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++ ++/dts-v1/; ++ ++#include "aspeed-g7.dtsi" ++#include ++ ++#define PCIE0_EP 1 // 1: EP, 0: RC ++#define PCIE1_EP 0 // 1: EP, 0: RC ++ ++/ { ++ model = "AST2700-SLT"; ++ compatible = "aspeed,ast2700-slt", "aspeed,ast2700"; ++ ++ chosen { ++ stdout-path = "serial12:115200n8"; ++ }; ++ ++ memory@400000000 { ++ device_type = "memory"; ++ reg = <0x4 0x00000000 0x0 0x40000000>; ++ }; ++ ++ reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ #include "ast2700-reserved-mem.dtsi" ++ ++ video_engine_memory0: video0 { ++ size = <0x0 0x04000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ video_engine_memory1: video1{ ++ size = <0x0 0x04000000>; ++ alignment = <0x0 0x00010000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ ++ gfx_memory: framebuffer { ++ size = <0x0 0x01000000>; ++ alignment = <0x0 0x01000000>; ++ compatible = "shared-dma-pool"; ++ reusable; ++ }; ++ }; ++ ++ fan3: pwm-fan3 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan4: pwm-fan4 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan5: pwm-fan5 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan6: pwm-fan6 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ fan7: pwm-fan7 { ++ compatible = "pwm-fan"; ++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */ ++ cooling-min-state = <0>; ++ cooling-max-state = <3>; ++ #cooling-cells = <2>; ++ cooling-levels = <0 15 128 255>; ++ }; ++ ++ iio-hwmon { ++ compatible = "iio-hwmon"; ++ status = "okay"; ++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>, ++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>, ++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>, ++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>; ++ }; ++}; ++ ++&pwm_tach { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm3_default ++ &pinctrl_pwm4_default &pinctrl_pwm5_default ++ &pinctrl_pwm6_default &pinctrl_pwm7_default ++ &pinctrl_tach0_default &pinctrl_tach1_default ++ &pinctrl_tach2_default &pinctrl_tach3_default ++ &pinctrl_tach4_default &pinctrl_tach5_default ++ &pinctrl_tach6_default &pinctrl_tach7_default ++ &pinctrl_tach8_default &pinctrl_tach9_default ++ &pinctrl_tach10_default &pinctrl_tach11_default ++ &pinctrl_tach12_default &pinctrl_tach13_default ++ &pinctrl_tach14_default &pinctrl_tach15_default>; ++ fan-0 { ++ tach-ch = /bits/ 8 <0x0>; ++ }; ++ fan-1 { ++ tach-ch = /bits/ 8 <0x1>; ++ }; ++ fan-2 { ++ tach-ch = /bits/ 8 <0x2>; ++ }; ++ fan-3 { ++ tach-ch = /bits/ 8 <0x3>; ++ }; ++ fan-4 { ++ tach-ch = /bits/ 8 <0x4>; ++ }; ++ fan-5 { ++ tach-ch = /bits/ 8 <0x5>; ++ }; ++ fan-6 { ++ tach-ch = /bits/ 8 <0x6>; ++ }; ++ fan-7 { ++ tach-ch = /bits/ 8 <0x7>; ++ }; ++ fan-8 { ++ tach-ch = /bits/ 8 <0x8>; ++ }; ++ fan-9 { ++ tach-ch = /bits/ 8 <0x9>; ++ }; ++ fan-10 { ++ tach-ch = /bits/ 8 <0xA>; ++ }; ++ fan-11 { ++ tach-ch = /bits/ 8 <0xB>; ++ }; ++ fan-12 { ++ tach-ch = /bits/ 8 <0xC>; ++ }; ++ fan-13 { ++ tach-ch = /bits/ 8 <0xD>; ++ }; ++ fan-14 { ++ tach-ch = /bits/ 8 <0xE>; ++ }; ++ fan-15 { ++ tach-ch = /bits/ 8 <0xF>; ++ }; ++}; ++ ++&peci0 { ++ status = "okay"; ++}; ++ ++&mctp0 { ++ status = "okay"; ++ memory-region = <&mctp0_reserved>; ++}; ++ ++&mctp1 { ++ status = "okay"; ++ memory-region = <&mctp1_reserved>; ++}; ++ ++&mctp2 { ++ status = "okay"; ++ memory-region = <&mctp2_reserved>; ++}; ++ ++&adc0 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default ++ &pinctrl_adc2_default &pinctrl_adc3_default ++ &pinctrl_adc4_default &pinctrl_adc5_default ++ &pinctrl_adc6_default &pinctrl_adc7_default>; ++}; ++ ++&adc1 { ++ aspeed,int-vref-microvolt = <2500000>; ++ status = "okay"; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default ++ &pinctrl_adc10_default &pinctrl_adc11_default ++ &pinctrl_adc12_default &pinctrl_adc13_default ++ &pinctrl_adc14_default &pinctrl_adc15_default>; ++}; ++ ++&pinctrl1 { ++ pinctrl_rgmii0_driving: rgmii0-driving { ++ pins = "C20", "C19", "A8", "R14", "A7", "P14", ++ "D20", "A6", "B6", "N14", "B7", "B8"; ++ drive-strength = <1>; ++ }; ++}; ++ ++&i3c0 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06010000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c1 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c2 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06012000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c3 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c4 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06014000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c5 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c6 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06016000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c7 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c8 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x06018000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c9 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c10 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601A000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c11 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c12 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601C000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c13 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&i3c14 { ++ initial-role = "target"; ++ pid = <0x000007ec 0x0601E000>; ++ dcr = /bits/ 8 <0xcc>; ++ status = "okay"; ++}; ++ ++&i3c15 { ++ initial-role = "primary"; ++ status = "okay"; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&uart2 { ++ status = "okay"; ++}; ++ ++&uart3 { ++ status = "okay"; ++}; ++ ++&uart4 { ++ status = "okay"; ++}; ++ ++&uart5 { ++ status = "okay"; ++}; ++ ++&uart6 { ++ status = "okay"; ++}; ++ ++&uart7 { ++ status = "okay"; ++}; ++ ++&uart12 { ++ status = "okay"; ++}; ++ ++&fmc { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_fwspi_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "bmc"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++#include "aspeed-evb-flash-layout-128.dtsi" ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@2 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "fmc0:2"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi0 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default &pinctrl_spi0_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi0:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi1 { ++ status = "okay"; ++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default &pinctrl_spi1_quad_default>; ++ pinctrl-names = "default"; ++ ++ flash@0 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ m25p,fast-read; ++ label = "spi1:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&spi2 { ++ compatible = "aspeed,ast2700-spi"; ++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default &pinctrl_spi2_quad_default>; ++ pinctrl-names = "default"; ++ status = "okay"; ++ ++ flash@0 { ++ status = "okay"; ++ reg = < 0 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:0"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++ ++ flash@1 { ++ status = "okay"; ++ reg = < 1 >; ++ compatible = "jedec,spi-nor"; ++ m25p,fast-read; ++ label = "spi2:1"; ++ spi-max-frequency = <50000000>; ++ spi-tx-bus-width = <4>; ++ spi-rx-bus-width = <4>; ++ }; ++}; ++ ++&can0 { ++ /delete-property/ pinctrl-0; ++ /delete-property/ pinctrl-names; ++ ++ can-internal-loopback; ++ status = "okay"; ++}; ++ ++&emmc_controller { ++ status = "okay"; ++ mmc-hs200-1_8v; ++}; ++ ++&emmc { ++ status = "okay"; ++#if 1 ++ bus-width = <4>; ++#else ++ bus-width = <8>; ++ pinctrl-0 = <&pinctrl_emmc_default ++ &pinctrl_emmcg8_default>; ++#endif ++ non-removable; ++ max-frequency = <200000000>; ++}; ++ ++&ufs_controller { ++ status = "okay"; ++}; ++ ++&ufs { ++ status = "okay"; ++ lanes-per-direction = <2>; ++ ref-clk-freq = <26000000>; ++}; ++ ++&chassis { ++ status = "okay"; ++}; ++ ++&mdio0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy0: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio1 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy1: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mdio2 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ethphy2: ethernet-phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ reg = <0>; ++ }; ++}; ++ ++&mac0 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy0>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>; ++}; ++ ++&mac1 { ++ status = "okay"; ++ ++ phy-mode = "rgmii-id"; ++ phy-handle = <ðphy1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_rgmii1_default>; ++}; ++ ++&sgmii { ++ status = "okay"; ++}; ++ ++&mac2 { ++ status = "okay"; ++ ++ phy-mode = "sgmii"; ++ phy-handle = <ðphy2>; ++}; ++ ++&syscon1 { ++ mac0-clk-delay = <0x1a 0x15 ++ 0x10 0x10 ++ 0x10 0x10>; ++ mac1-clk-delay = <0x19 0x17 ++ 0x10 0x10 ++ 0x10 0x10>; ++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>, ++ <&syscon1 SCU1_CLK_RGMII>, ++ <&syscon1 SCU1_CLK_RMII>; ++ assigned-clock-rates = <200000000>, <125000000>, <50000000>; ++}; ++ ++&lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0xca0>; ++ kcs-channel = <0>; ++}; ++ ++&lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0xca8>; ++ kcs-channel = <1>; ++}; ++ ++&lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0xca2>; ++ kcs-channel = <2>; ++}; ++ ++&lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0xca4>; ++ kcs-channel = <3>; ++}; ++ ++&lpc0_ibt { ++ status = "okay"; ++}; ++ ++&lpc0_mbox { ++ status = "okay"; ++}; ++ ++&lpc0_snoop { ++ status = "okay"; ++ snoop-ports = <0x80>, <0x81>; ++}; ++ ++&lpc0_uart_routing { ++ status = "okay"; ++}; ++ ++&video0 { ++ status = "okay"; ++ memory-region = <&video_engine_memory0>; ++}; ++ ++&video1 { ++ status = "okay"; ++ memory-region = <&video_engine_memory1>; ++}; ++ ++&rtc { ++ status = "okay"; ++}; ++ ++&rsss { ++ status = "okay"; ++}; ++ ++&ecdsa { ++ status = "okay"; ++}; ++ ++&hace { ++ status = "okay"; ++}; ++ ++#if PCIE0_EP ++&bmc_dev0 { ++ status = "okay"; ++ memory-region = <&bmc_dev0_memory>; ++}; ++ ++&xdma0 { ++ status = "okay"; ++ memory-region = <&xdma_memory0>; ++}; ++ ++&pcie_vuart0 { ++ port = <0x3f8>; ++ sirq = <4>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_vuart1 { ++ port = <0x2f8>; ++ sirq = <3>; ++ sirq-polarity = <0>; ++ ++ status = "okay"; ++}; ++ ++&pcie_lpc0_kcs0 { ++ status = "okay"; ++ kcs-io-addr = <0x3a0>; ++ kcs-channel = <8>; ++}; ++ ++&pcie_lpc0_kcs1 { ++ status = "okay"; ++ kcs-io-addr = <0x3a8>; ++ kcs-channel = <9>; ++}; ++ ++&pcie_lpc0_kcs2 { ++ status = "okay"; ++ kcs-io-addr = <0x3a2>; ++ kcs-channel = <10>; ++}; ++ ++&pcie_lpc0_kcs3 { ++ status = "okay"; ++ kcs-io-addr = <0x3a4>; ++ kcs-channel = <11>; ++}; ++ ++&pcie_lpc0_ibt { ++ status = "okay"; ++ bt-channel = <2>; ++}; ++ ++&pcie0_mmbi0 { ++ status = "okay"; ++ memory-region = <&pcie0_mmbi0_memory>; ++ ++ mmbi-bmc-int-value = /bits/ 8 <0x00>; ++ mmbi-bmc-int-offset = <0x100000>; ++}; ++#endif ++ ++&pcie1 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; ++ ++&i2c2 { ++ status = "okay"; ++}; ++ ++&i2c3 { ++ status = "okay"; ++}; ++ ++&i2c4 { ++ status = "okay"; ++}; ++ ++&i2c5 { ++ status = "okay"; ++}; ++ ++&i2c6 { ++ status = "okay"; ++}; ++ ++&i2c7 { ++ status = "okay"; ++}; ++ ++&i2c8 { ++ status = "okay"; ++}; ++ ++&i2c9 { ++ status = "okay"; ++}; ++ ++&i2c10 { ++ status = "okay"; ++}; ++ ++&i2c11 { ++ status = "okay"; ++}; ++ ++#if 1 ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ehci1 { ++ status = "okay"; ++}; ++ ++&uhci0 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&uphy3a { ++ status = "okay"; ++}; ++ ++&uphy3b { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&xhci0 { ++ status = "okay"; ++}; ++ ++&xhci1 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&vhuba1 { ++ status = "okay"; ++}; ++ ++&vhubb1 { ++ status = "okay"; ++}; ++ ++#endif ++ ++#if 0 ++&vhuba0 { ++ status = "okay"; ++}; ++ ++&vhubb0 { ++ status = "okay"; ++}; ++#endif ++ ++#if 0 ++&usb3ahp { ++ status = "okay"; ++}; ++ ++&usb3bhp { ++ status = "okay"; ++}; ++ ++&uphy2a { ++ status = "okay"; ++}; ++ ++&uphy2b { ++ status = "okay"; ++}; ++#endif ++ ++&vhubc { ++ status = "okay"; ++}; ++ ++&ehci3 { ++ status = "okay"; ++}; ++ ++&wdt0 { ++ status = "okay"; ++}; ++ ++&wdt1 { ++ status = "okay"; ++}; ++ ++&otp { ++ status = "okay"; ++}; +diff --git a/drivers/Makefile b/drivers/Makefile +--- a/drivers/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/Makefile 2025-12-23 10:16:13.352162932 +0000 +@@ -195,3 +195,4 @@ + obj-$(CONFIG_DPLL) += dpll/ + + obj-$(CONFIG_S390) += s390/ ++obj-$(CONFIG_JTAG_ASPEED) += jtag/ +diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig +--- a/drivers/bus/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/bus/Kconfig 2025-12-23 10:16:08.870238093 +0000 +@@ -261,6 +261,12 @@ + configuration. Allows to adjust the priorities of all master + peripherals. + ++config ASPEED_LTPI ++ bool "Aspeed LTPI bus controller driver" ++ depends on ARCH_ASPEED ++ help ++ LVDS Tunneling Protocol and Interface (LTPI) bus controller ++ + source "drivers/bus/fsl-mc/Kconfig" + source "drivers/bus/mhi/Kconfig" + +diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile +--- a/drivers/bus/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/bus/Makefile 2025-12-23 10:16:12.865171099 +0000 +@@ -39,6 +39,7 @@ + obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o + + obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o ++obj-$(CONFIG_ASPEED_LTPI) += aspeed-ltpi.o + + # MHI + obj-y += mhi/ +diff --git a/drivers/bus/aspeed-ltpi.c b/drivers/bus/aspeed-ltpi.c +--- a/drivers/bus/aspeed-ltpi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/bus/aspeed-ltpi.c 2025-12-23 10:16:20.967035301 +0000 +@@ -0,0 +1,272 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright ASPEED Technology ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define LTPI_AUTO_CAP_LOW 0x24 ++#define LTPI_I2C_IO_FRAME_EN GENMASK(29, 24) ++#define LTPI_AUTO_CAP_HIGH 0x28 ++#define LTPI_UART_IO_FRAME_EN GENMASK(14, 13) ++ ++#define LTPI_LINK_CONTROLL 0x80 ++#define LTPI_AUTO_CONFIG BIT(10) ++ ++#define LTPI_INTR_STATUS 0x100 ++#define LTPI_INTR_EN 0x104 ++#define LTPI_INTR_EN_OP_LINK_LOST BIT(4) ++#define LTPI_LINK_MANAGE_ST 0x108 ++#define LTPI_LINK_PARTNER_FLAG BIT(24) ++ ++#define LTPI_MANUAL_CAP_LOW 0x118 ++#define LTPI_MANUAL_CAP_HIGH 0x11c ++ ++#define LTPI_I2C_TIMING_0 0x134 ++#define LTPI_I2C_TIMING_1 0x138 ++ ++#define LTPI_I2C_100K_0 0x3535352f ++#define LTPI_I2C_100K_1 0x09353535 ++ ++#define LTPI_I2C_400K_0 0x06060d06 ++#define LTPI_I2C_400K_1 0x090d0a06 ++ ++#define SCU_IO_PINS_TRAP1 0x10 ++#define SCU_IO_PINS_TRAP1_CLEAR 0x14 ++#define SCU_IO_PINS_TRAP_LTPI GENMASK(2, 0) ++#define SCU_IO_OTP_TRAP1 0xa00 ++#define SCU_IO_OTP_TRAP1_CLEAR 0xa04 ++#define SCU_IO_OTP_TRAP2 0xa20 ++#define SCU_IO_OTP_TRAP2_CLEAR 0xa24 ++ ++#define MAX_I2C_IN_LTPI 6 ++#define MAX_UART_IN_LTPI 2 ++ ++enum chip_version { ++ AST2700, ++ AST1700, ++}; ++ ++struct aspeed_ltpi_priv { ++ struct device *dev; ++ void __iomem *regs; ++ struct clk *ltpi_clk; ++ struct clk *ltpi_phyclk; ++ struct reset_control *ltpi_rst; ++ struct regmap *scu; ++ u32 version; ++ u32 i2c_tunneling; ++ u32 i2c_timing_0; ++ u32 i2c_timing_1; ++ u32 uart_tunneling; ++}; ++ ++static irqreturn_t aspeed_ltpi_irq_handler(int irq, void *dev_id) ++{ ++ struct aspeed_ltpi_priv *priv = dev_id; ++ u32 status = readl(priv->regs + LTPI_INTR_STATUS); ++ ++ if (status & LTPI_INTR_EN_OP_LINK_LOST) { ++ writel(0, priv->regs + LTPI_INTR_EN); ++ writel(status, priv->regs + LTPI_INTR_STATUS); ++ panic("LTPI link lost!\n"); ++ /* Will not return */ ++ } ++ ++ writel(status, priv->regs + LTPI_INTR_STATUS); ++ ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_ltpi_init_mux(struct aspeed_ltpi_priv *priv) ++{ ++ u32 reg, i2c_en, uart_en, i; ++ ++ reg = readl(priv->regs + LTPI_AUTO_CAP_LOW); ++ ++ i2c_en = FIELD_GET(LTPI_I2C_IO_FRAME_EN, reg); ++ i2c_en &= priv->i2c_tunneling; ++ ++ reg &= ~LTPI_I2C_IO_FRAME_EN; ++ reg |= FIELD_PREP(LTPI_I2C_IO_FRAME_EN, i2c_en); ++ writel(reg, priv->regs + LTPI_MANUAL_CAP_LOW); ++ ++ reg = readl(priv->regs + LTPI_AUTO_CAP_HIGH); ++ ++ uart_en = FIELD_GET(LTPI_UART_IO_FRAME_EN, reg); ++ uart_en &= priv->uart_tunneling; ++ ++ reg &= ~LTPI_UART_IO_FRAME_EN; ++ reg |= FIELD_PREP(LTPI_UART_IO_FRAME_EN, uart_en); ++ ++ writel(reg, priv->regs + LTPI_MANUAL_CAP_HIGH); ++ ++ /* Apply LTPI manual configuration */ ++ reg = readl(priv->regs + LTPI_LINK_CONTROLL); ++ reg &= ~LTPI_AUTO_CONFIG; ++ writel(reg, priv->regs + LTPI_LINK_CONTROLL); ++ ++ /* Set the AST1700 i2c ac-timing */ ++ if (priv->version == AST1700) { ++ /* Apply i2c timing with i2c tunneling setting */ ++ for (i = 0; i < MAX_I2C_IN_LTPI; i++) { ++ if ((priv->i2c_tunneling >> i) & 0x1) { ++ writel(priv->i2c_timing_0, ++ priv->regs + LTPI_I2C_TIMING_0 + (0x8 * i)); ++ writel(priv->i2c_timing_1, ++ priv->regs + LTPI_I2C_TIMING_1 + (0x8 * i)); ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static int aspeed_ltpi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ const struct of_dev_auxdata *lookup = dev_get_platdata(dev); ++ struct device_node *np = dev->of_node; ++ const struct of_device_id *match; ++ struct aspeed_ltpi_priv *priv; ++ int irq, ret; ++ ++ match = of_match_device(dev->driver->of_match_table, dev); ++ ++ if (match) { ++ if (of_property_match_string(np, "compatible", match->compatible) < 0) ++ return -ENODEV; ++ } else { ++ return -ENODEV; ++ } ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ priv->dev = dev; ++ priv->regs = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(priv->regs)) ++ return PTR_ERR(priv->regs); ++ ++ priv->version = (enum chip_version)device_get_match_data(dev); ++ ++ priv->ltpi_clk = devm_clk_get(&pdev->dev, "ltpi"); ++ if (IS_ERR(priv->ltpi_clk)) { ++ priv->ltpi_clk = devm_clk_get(&pdev->dev, "ahb"); ++ if (IS_ERR(priv->ltpi_clk)) ++ return PTR_ERR(priv->ltpi_clk); ++ ++ clk_prepare_enable(priv->ltpi_clk); ++ ++ priv->ltpi_phyclk = devm_clk_get(&pdev->dev, "phy"); ++ if (IS_ERR(priv->ltpi_phyclk)) ++ return PTR_ERR(priv->ltpi_phyclk); ++ ++ clk_prepare_enable(priv->ltpi_phyclk); ++ } else { ++ priv->ltpi_phyclk = NULL; ++ } ++ ++ priv->ltpi_rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); ++ if (IS_ERR(priv->ltpi_rst)) ++ return PTR_ERR(priv->ltpi_rst); ++ ++ reset_control_deassert(priv->ltpi_rst); ++ ++ priv->i2c_tunneling = GENMASK(MAX_I2C_IN_LTPI - 1, 0); ++ if (!of_property_read_u32(np, "i2c-tunneling", &ret)) ++ priv->i2c_tunneling = ret; ++ ++ priv->i2c_timing_0 = LTPI_I2C_100K_0; ++ priv->i2c_timing_1 = LTPI_I2C_100K_1; ++ if (!of_property_read_u32(np, "i2c-tunneling-timing", &ret)) { ++ if (ret == 400) { ++ priv->i2c_timing_0 = LTPI_I2C_400K_0; ++ priv->i2c_timing_1 = LTPI_I2C_400K_1; ++ } ++ } ++ priv->uart_tunneling = GENMASK(MAX_UART_IN_LTPI - 1, 0); ++ if (!of_property_read_u32(np, "uart-tunneling", &ret)) ++ priv->uart_tunneling = ret; ++ ++ priv->scu = syscon_regmap_lookup_by_phandle(np, "aspeed,scu"); ++ if (of_get_property(np, "remote-controller", NULL)) { ++ u32 reg; ++ ++ /* Clear all the pins/otp strap but LTPI related settings for AST1700 */ ++ regmap_read(priv->scu, SCU_IO_PINS_TRAP1, ®); ++ reg &= ~SCU_IO_PINS_TRAP_LTPI; ++ regmap_write(priv->scu, SCU_IO_PINS_TRAP1_CLEAR, reg); ++ ++ regmap_read(priv->scu, SCU_IO_OTP_TRAP1, ®); ++ regmap_write(priv->scu, SCU_IO_OTP_TRAP1_CLEAR, reg); ++ ++ regmap_read(priv->scu, SCU_IO_OTP_TRAP2, ®); ++ regmap_write(priv->scu, SCU_IO_OTP_TRAP2_CLEAR, reg); ++ } else { ++ irq = platform_get_irq(pdev, 0); ++ ret = devm_request_irq(priv->dev, irq, aspeed_ltpi_irq_handler, ++ 0, dev_name(priv->dev), priv); ++ if (ret) { ++ dev_err(priv->dev, "failed to request irq\n"); ++ reset_control_assert(priv->ltpi_rst); ++ clk_disable_unprepare(priv->ltpi_phyclk); ++ clk_disable_unprepare(priv->ltpi_clk); ++ return ret; ++ } ++ ++ writel(LTPI_INTR_EN_OP_LINK_LOST, priv->regs + LTPI_INTR_STATUS); ++ writel(LTPI_INTR_EN_OP_LINK_LOST, priv->regs + LTPI_INTR_EN); ++ } ++ ++ aspeed_ltpi_init_mux(priv); ++ ++ platform_set_drvdata(pdev, priv); ++ if (np) ++ of_platform_populate(np, NULL, lookup, priv->dev); ++ ++ return 0; ++} ++ ++static void aspeed_ltpi_remove(struct platform_device *pdev) ++{ ++ struct aspeed_ltpi_priv *priv; ++ ++ priv = platform_get_drvdata(pdev); ++ reset_control_assert(priv->ltpi_rst); ++ clk_disable_unprepare(priv->ltpi_phyclk); ++ clk_disable_unprepare(priv->ltpi_clk); ++} ++ ++static const struct of_device_id aspeed_ltpi_of_match[] = { ++ { .compatible = "aspeed-ltpi", .data = (const void *)AST2700,}, ++ { .compatible = "aspeed-ast1700-ltpi", .data = (const void *)AST1700,}, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, aspeed_ltpi_of_match); ++ ++static struct platform_driver aspeed_ltpi_driver = { ++ .probe = aspeed_ltpi_probe, ++ .remove = aspeed_ltpi_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_ltpi_of_match, ++ }, ++}; ++ ++module_platform_driver(aspeed_ltpi_driver); ++ ++MODULE_DESCRIPTION("LVDS Tunneling Protocol and Interface Bus Driver"); ++MODULE_AUTHOR("Dylan Hung "); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig +--- a/drivers/char/hw_random/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/char/hw_random/Kconfig 2025-12-23 10:16:09.603225801 +0000 +@@ -286,6 +286,7 @@ + config HW_RANDOM_NOMADIK + tristate "ST-Ericsson Nomadik Random Number Generator support" + depends on ARCH_NOMADIK || COMPILE_TEST ++ depends on ARM_AMBA + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number +@@ -587,6 +588,21 @@ + + If unsure, say Y. + ++config HW_RANDOM_ASPEED ++ tristate "Aspeed Random Number Generator support" ++ depends on ARCH_ASPEED ++ default HW_RANDOM ++ help ++ This driver provides kernel-side support for the Random Number ++ Generator hardware found on Aspeed ast2600/ast2700 devices. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called aspeed-rng. ++ ++ If unsure, say Y. ++ ++source "drivers/char/hw_random/dwc/Kconfig" ++ + endif # HW_RANDOM + + config UML_RANDOM +diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile +--- a/drivers/char/hw_random/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/char/hw_random/Makefile 2025-12-23 10:16:13.771155906 +0000 +@@ -50,3 +50,5 @@ + obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o + obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o + obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o ++obj-$(CONFIG_HW_RANDOM_ASPEED) += aspeed-rng.o ++obj-$(CONFIG_HW_RANDOM_DWC) += dwc/ +diff --git a/drivers/char/hw_random/aspeed-rng.c b/drivers/char/hw_random/aspeed-rng.c +--- a/drivers/char/hw_random/aspeed-rng.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/aspeed-rng.c 2025-12-23 10:16:21.099033088 +0000 +@@ -0,0 +1,134 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) ASPEED Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define TRNG_CTL 0x00 ++#define TRNG_EN 0x0 ++#define TRNG_MODE 0x04 ++#define TRNG_RDY 0x1f ++#define TRNG_ODATA 0x04 ++ ++struct aspeed_trng { ++ u32 ver; ++ void __iomem *base; ++ struct hwrng rng; ++ unsigned int present: 1; ++ ktime_t period; ++ struct hrtimer timer; ++ struct completion completion; ++}; ++ ++static int aspeed_trng_read(struct hwrng *rng, void *buf, size_t max, ++ bool wait) ++{ ++ struct aspeed_trng *priv = container_of(rng, struct aspeed_trng, rng); ++ u32 *data = buf; ++ size_t read = 0; ++ int timeout = max / 4 + 1; ++ ++ while (read < max) { ++ if (!(readl(priv->base + TRNG_CTL) & (1 << TRNG_RDY))) { ++ if (wait) { ++ if (timeout-- == 0) ++ return read; ++ } else { ++ return 0; ++ } ++ } else { ++ *data = readl(priv->base + TRNG_ODATA); ++ data++; ++ read += 4; ++ } ++ } ++ ++ return read; ++} ++ ++static void aspeed_trng_enable(struct aspeed_trng *priv) ++{ ++ u32 ctl; ++ ++ ctl = readl(priv->base + TRNG_CTL); ++ ctl = ctl & ~(1 << TRNG_EN); /* enable rng */ ++ ctl = ctl | (3 << TRNG_MODE); /* select mode */ ++ ++ writel(ctl, priv->base + TRNG_CTL); ++} ++ ++static void aspeed_trng_disable(struct aspeed_trng *priv) ++{ ++ writel(1, priv->base + TRNG_CTL); ++} ++ ++static int aspeed_trng_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct aspeed_trng *priv; ++ struct resource *res; ++ int ret; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ priv->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->base)) ++ return PTR_ERR(priv->base); ++ ++ priv->rng.name = pdev->name; ++ priv->rng.quality = 900; ++ priv->rng.read = aspeed_trng_read; ++ ++ aspeed_trng_enable(priv); ++ ++ ret = devm_hwrng_register(&pdev->dev, &priv->rng); ++ if (ret) ++ return ret; ++ ++ platform_set_drvdata(pdev, priv); ++ ++ dev_info(dev, "Aspeed Hardware RNG successfully registered\n"); ++ ++ return 0; ++} ++ ++static void aspeed_trng_remove(struct platform_device *pdev) ++{ ++ struct aspeed_trng *priv = platform_get_drvdata(pdev); ++ ++ aspeed_trng_disable(priv); ++} ++ ++static const struct of_device_id aspeed_trng_dt_ids[] = { ++ { .compatible = "aspeed,ast2600-trng" }, ++ { .compatible = "aspeed,ast2700-trng" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_trng_dt_ids); ++ ++static struct platform_driver aspeed_trng_driver = { ++ .probe = aspeed_trng_probe, ++ .remove = aspeed_trng_remove, ++ .driver = { ++ .name = "aspeed-trng", ++ .of_match_table = aspeed_trng_dt_ids, ++ }, ++}; ++ ++module_platform_driver(aspeed_trng_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Neal Liu "); ++MODULE_DESCRIPTION("Aspeed true random number generator driver"); +diff --git a/drivers/char/hw_random/dwc/Kconfig b/drivers/char/hw_random/dwc/Kconfig +--- a/drivers/char/hw_random/dwc/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/Kconfig 2025-12-23 10:16:19.523059502 +0000 +@@ -0,0 +1,18 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# ++# DWC Hardware Random Number Generator (RNG) configuration ++# ++ ++config HW_RANDOM_DWC ++ tristate "DesignWare Cores HW Random Number Generator support" ++ depends on HW_RANDOM ++ depends on ARCH_ASPEED || COMPILE_TEST ++ help ++ This driver provides kernel-side support for the DWC ++ Random Number Generator hardware found on Aspeed SoCs. ++ ++ To compile this driver as a module, choose M here. the ++ module will be called dwc-rng. ++ ++ If unsure, say Y. ++ +diff --git a/drivers/char/hw_random/dwc/Makefile b/drivers/char/hw_random/dwc/Makefile +--- a/drivers/char/hw_random/dwc/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/Makefile 2025-12-23 10:16:19.523059502 +0000 +@@ -0,0 +1,22 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++ccflags-y := -I $(srctree)/drivers/char/hw_random/dwc/src/trng/include \ ++ -I $(srctree)/drivers/char/hw_random/dwc/src/pdu/linux/include ++ ++obj-$(CONFIG_HW_RANDOM_DWC) += elppdu.o ++elppdu-objs := src/pdu/linux/kernel/pdu.o \ ++ src/pdu/common/pdu/pdu_dev32.o \ ++ ++obj-$(CONFIG_HW_RANDOM_DWC) += elpmem.o ++elpmem-objs := src/pdu/linux/kernel/spacc_mem.o \ ++ ++obj-$(CONFIG_HW_RANDOM_DWC) += nisttrng.o ++nisttrng-objs := src/trng/kernel/nist_trng.o \ ++ src/trng/trng/nist_trng.o \ ++ src/trng/trng/nist_trng_private.o \ ++ ++clean: ++ @find \( -name '*.o' \ ++ -o -name '*.a' \ ++ -o -name '*.order' \ ++ \) -type f -print | xargs rm -rvf +diff --git a/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h b/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h +--- a/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,96 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SYNPDU_ERROR_H_ ++#define SYNPDU_ERROR_H_ ++ ++/* ++ * Common error definitions. Be sure to update pdu_error_code when changing ++ * anything in this list. ++ */ ++ ++#define CRYPTO_OK (0) ++#define CRYPTO_FAILED (-1) ++#define CRYPTO_INPROGRESS (-2) ++#define CRYPTO_INVALID_HANDLE (-3) ++#define CRYPTO_INVALID_CONTEXT (-4) ++#define CRYPTO_INVALID_SIZE (-5) ++#define CRYPTO_NOT_INITIALIZED (-6) ++#define CRYPTO_NO_MEM (-7) ++#define CRYPTO_INVALID_ALG (-8) ++#define CRYPTO_INVALID_KEY_SIZE (-9) ++#define CRYPTO_INVALID_ARGUMENT (-10) ++#define CRYPTO_MODULE_DISABLED (-11) ++#define CRYPTO_NOT_IMPLEMENTED (-12) ++#define CRYPTO_INVALID_BLOCK_ALIGNMENT (-13) ++#define CRYPTO_INVALID_MODE (-14) ++#define CRYPTO_INVALID_KEY (-15) ++#define CRYPTO_AUTHENTICATION_FAILED (-16) ++#define CRYPTO_INVALID_IV_SIZE (-17) ++#define CRYPTO_MEMORY_ERROR (-18) ++#define CRYPTO_LAST_ERROR (-19) ++#define CRYPTO_HALTED (-20) ++#define CRYPTO_TIMEOUT (-21) ++#define CRYPTO_SRM_FAILED (-22) ++#define CRYPTO_COMMON_ERROR_MAX (-100) ++#define CRYPTO_INVALID_ICV_KEY_SIZE (-100) ++#define CRYPTO_INVALID_PARAMETER_SIZE (-101) ++#define CRYPTO_SEQUENCE_OVERFLOW (-102) ++#define CRYPTO_DISABLED (-103) ++#define CRYPTO_INVALID_VERSION (-104) ++#define CRYPTO_FATAL (-105) ++#define CRYPTO_INVALID_PAD (-106) ++#define CRYPTO_FIFO_FULL (-107) ++#define CRYPTO_INVALID_SEQUENCE (-108) ++#define CRYPTO_INVALID_FIRMWARE (-109) ++#define CRYPTO_NOT_FOUND (-110) ++#define CRYPTO_CMD_FIFO_INACTIVE (-111) ++#define CRYPTO_INVALID_PROTOCOL (-112) ++#define CRYPTO_REPLAY (-113) ++#define CRYPTO_NOT_INSTANTIATED (-114) ++#define CRYPTO_RESEED_REQUIRED (-115) ++ ++#endif +diff --git a/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c b/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c +--- a/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,165 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "elppdu.h" ++ ++void pdu_to_dev32(void *addr_, u32 *src, unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ ++ while (nword--) { ++ pdu_io_write32(addr, *src++); ++ addr += 4; ++ } ++} ++EXPORT_SYMBOL(pdu_to_dev32); ++ ++void pdu_from_dev32(u32 *dst, void *addr_, unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ ++ while (nword--) { ++ *dst++ = pdu_io_read32(addr); ++ addr += 4; ++ } ++} ++EXPORT_SYMBOL(pdu_from_dev32); ++ ++void pdu_to_dev32_big(void *addr_, const unsigned char *src, ++ unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ unsigned long v; ++ ++ while (nword--) { ++ v = 0; ++ v = (v << 8) | ((unsigned long)*src++); ++ v = (v << 8) | ((unsigned long)*src++); ++ v = (v << 8) | ((unsigned long)*src++); ++ v = (v << 8) | ((unsigned long)*src++); ++ pdu_io_write32(addr, v); ++ addr += 4; ++ } ++} ++EXPORT_SYMBOL(pdu_to_dev32_big); ++ ++void pdu_from_dev32_big(unsigned char *dst, void *addr_, unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ unsigned long v; ++ ++ while (nword--) { ++ v = pdu_io_read32(addr); ++ addr += 4; ++ *dst++ = (v >> 24) & 0xFF; ++ v <<= 8; ++ *dst++ = (v >> 24) & 0xFF; ++ v <<= 8; ++ *dst++ = (v >> 24) & 0xFF; ++ v <<= 8; ++ *dst++ = (v >> 24) & 0xFF; ++ v <<= 8; ++ } ++} ++EXPORT_SYMBOL(pdu_from_dev32_big); ++ ++void pdu_to_dev32_little(void *addr_, const unsigned char *src, ++ unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ unsigned long v; ++ ++ while (nword--) { ++ v = 0; ++ v = (v >> 8) | ((unsigned long)*src++ << 24UL); ++ v = (v >> 8) | ((unsigned long)*src++ << 24UL); ++ v = (v >> 8) | ((unsigned long)*src++ << 24UL); ++ v = (v >> 8) | ((unsigned long)*src++ << 24UL); ++ pdu_io_write32(addr, v); ++ addr += 4; ++ } ++} ++EXPORT_SYMBOL(pdu_to_dev32_little); ++ ++void pdu_from_dev32_little(unsigned char *dst, void *addr_, unsigned long nword) ++{ ++ unsigned char *addr = addr_; ++ unsigned long v; ++ ++ while (nword--) { ++ v = pdu_io_read32(addr); ++ addr += 4; ++ *dst++ = v & 0xFF; ++ v >>= 8; ++ *dst++ = v & 0xFF; ++ v >>= 8; ++ *dst++ = v & 0xFF; ++ v >>= 8; ++ *dst++ = v & 0xFF; ++ v >>= 8; ++ } ++} ++EXPORT_SYMBOL(pdu_from_dev32_little); ++ ++void pdu_to_dev32_s(void *addr, const unsigned char *src, unsigned long nword, ++ int endian) ++{ ++ if (endian) ++ pdu_to_dev32_big(addr, src, nword); ++ else ++ pdu_to_dev32_little(addr, src, nword); ++} ++EXPORT_SYMBOL(pdu_to_dev32_s); ++ ++void pdu_from_dev32_s(unsigned char *dst, void *addr, unsigned long nword, ++ int endian) ++{ ++ if (endian) ++ pdu_from_dev32_big(dst, addr, nword); ++ else ++ pdu_from_dev32_little(dst, addr, nword); ++} ++EXPORT_SYMBOL(pdu_from_dev32_s); +diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h b/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h +--- a/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,125 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef SYNPDU_H_ ++#define SYNPDU_H_ ++ ++/* Platform Specific */ ++#include /* printk() */ ++#include /* size_t */ ++#include /* memcpy()/etc */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifndef PDU_BASE_ADDR ++#define PDU_BASE_ADDR 0x14c3b000 ++#endif ++ ++#ifndef PDU_BASE_IRQ ++#define PDU_BASE_IRQ 91 ++#endif ++ ++#define PDU_SINGLE_CORE 1 ++#define PDU_SINGLE_NIST_TRNG 1 ++ ++#if 1 ++#define SYNHW_PRINT printk ++#else ++#define SYNHW_PRINT(...) ++#endif ++ ++#define CPU_YIELD ++#define SYNHW_MEMCPY memcpy ++ ++// Debug modifier for printing, in linux adding KERN_DEBUG makes the output only show up in debug logs (avoids /var/log/messages) ++#define SYNHW_PRINT_DEBUG KERN_DEBUG ++ ++// Locking ++#define PDU_LOCK_TYPE spinlock_t ++#define PDU_INIT_LOCK(lock) spin_lock_init(lock) ++ ++// these are for IRQ contexts ++#define PDU_LOCK(lock, flags) spin_lock_irqsave(lock, flags) ++#define PDU_UNLOCK(lock, flags) spin_unlock_irqrestore(lock, flags) ++ ++// these are for bottom half BH contexts ++#define PDU_LOCK_TYPE_BH struct mutex ++#define PDU_INIT_LOCK_BH(lock) mutex_init(lock) ++#define PDU_LOCK_BH(lock) mutex_lock(lock) ++#define PDU_UNLOCK_BH(lock) mutex_unlock(lock) ++ ++#include "../../common/include/elppdu_error.h" ++ ++void *pdu_linux_map_regs(struct device *dev, struct resource *regs); ++ ++void pdu_io_write32(void *addr, unsigned long val); ++void pdu_io_cached_write32(void *addr, unsigned long val, u32 *cache); ++unsigned long pdu_io_read32(void *addr); ++ ++void pdu_to_dev32(void *addr, u32 *src, unsigned long nword); ++void pdu_from_dev32(u32 *dst, void *addr, unsigned long nword); ++void pdu_to_dev32_big(void *addr, const unsigned char *src, unsigned long nword); ++void pdu_from_dev32_big(unsigned char *dst, void *addr, unsigned long nword); ++void pdu_to_dev32_little(void *addr, const unsigned char *src, unsigned long nword); ++void pdu_from_dev32_little(unsigned char *dst, void *addr, unsigned long nword); ++void pdu_from_dev32_s(unsigned char *dst, void *addr, unsigned long nword, int endian); ++void pdu_to_dev32_s(void *addr, const unsigned char *src, unsigned long nword, int endian); ++ ++void *pdu_malloc(unsigned long n); ++void pdu_free(void *p); ++ ++int pdu_error_code(int code); ++ ++#endif ++ +diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c +--- a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,188 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++ ++#include "elppdu.h" ++ ++static bool trace_io; ++module_param(trace_io, bool, 0600); ++MODULE_PARM_DESC(trace_io, "Trace MMIO reads/writes"); ++ ++void *pdu_linux_map_regs(struct device *dev, struct resource *regs) ++{ ++ return devm_ioremap_resource(dev, regs); ++} ++EXPORT_SYMBOL(pdu_linux_map_regs); ++ ++void pdu_io_write32(void *addr, unsigned long val) ++{ ++ if (trace_io) ++ SYNHW_PRINT("PDU: write %.8lx -> %p\n", val, addr); ++ ++ writel(val, addr); ++} ++EXPORT_SYMBOL(pdu_io_write32); ++ ++void pdu_io_cached_write32(void *addr, unsigned long val, uint32_t *cache) ++{ ++ if (*cache == val) { ++ if (trace_io) { ++ SYNHW_PRINT("PDU: write %.8lx -> %p (cached)\n", val, ++ addr); ++ } ++ return; ++ } ++ ++ *cache = val; ++ pdu_io_write32(addr, val); ++} ++EXPORT_SYMBOL(pdu_io_cached_write32); ++ ++unsigned long pdu_io_read32(void *addr) ++{ ++ unsigned long val; ++ ++ val = readl(addr); ++ ++ if (trace_io) ++ SYNHW_PRINT("PDU: read %.8lx <- %p\n", val, addr); ++ ++ return val; ++} ++EXPORT_SYMBOL(pdu_io_read32); ++ ++/* Platform specific memory allocation */ ++void *pdu_malloc(unsigned long n) ++{ ++ return vmalloc(n); ++} ++ ++void pdu_free(void *p) ++{ ++ vfree(p); ++} ++ ++/* Convert SDK error codes to corresponding kernel error codes. */ ++int pdu_error_code(int code) ++{ ++ switch (code) { ++ case CRYPTO_INPROGRESS: ++ return -EINPROGRESS; ++ case CRYPTO_INVALID_HANDLE: ++ case CRYPTO_INVALID_CONTEXT: ++ return -ENXIO; ++ case CRYPTO_NOT_INITIALIZED: ++ return -ENODATA; ++ case CRYPTO_INVALID_SIZE: ++ case CRYPTO_INVALID_ALG: ++ case CRYPTO_INVALID_KEY_SIZE: ++ case CRYPTO_INVALID_ARGUMENT: ++ case CRYPTO_INVALID_BLOCK_ALIGNMENT: ++ case CRYPTO_INVALID_MODE: ++ case CRYPTO_INVALID_KEY: ++ case CRYPTO_INVALID_IV_SIZE: ++ case CRYPTO_INVALID_ICV_KEY_SIZE: ++ case CRYPTO_INVALID_PARAMETER_SIZE: ++ case CRYPTO_REPLAY: ++ case CRYPTO_INVALID_PROTOCOL: ++ case CRYPTO_RESEED_REQUIRED: ++ return -EINVAL; ++ case CRYPTO_NOT_IMPLEMENTED: ++ case CRYPTO_MODULE_DISABLED: ++ return -ENOTSUPP; ++ case CRYPTO_NO_MEM: ++ return -ENOMEM; ++ case CRYPTO_INVALID_PAD: ++ case CRYPTO_INVALID_SEQUENCE: ++ return -EILSEQ; ++ case CRYPTO_MEMORY_ERROR: ++ return -EIO; ++ case CRYPTO_TIMEOUT: ++ return -ETIMEDOUT; ++ case CRYPTO_HALTED: ++ return -ECANCELED; ++ case CRYPTO_AUTHENTICATION_FAILED: ++ case CRYPTO_SEQUENCE_OVERFLOW: ++ case CRYPTO_INVALID_VERSION: ++ return -EPROTO; ++ case CRYPTO_FIFO_FULL: ++ return -EBUSY; ++ case CRYPTO_SRM_FAILED: ++ case CRYPTO_DISABLED: ++ case CRYPTO_LAST_ERROR: ++ return -EAGAIN; ++ case CRYPTO_FAILED: ++ case CRYPTO_FATAL: ++ return -EIO; ++ case CRYPTO_INVALID_FIRMWARE: ++ return -ENOEXEC; ++ case CRYPTO_NOT_FOUND: ++ return -ENOENT; ++ } ++ ++ /* ++ * Any unrecognized code is either success (i.e., zero) or a negative ++ * error code, which may be meaningless but at least will still be ++ * recognized as an error. ++ */ ++ return code; ++} ++EXPORT_SYMBOL(pdu_error_code); ++ ++static int __init pdu_mod_init(void) ++{ ++ return 0; ++} ++ ++static void __exit pdu_mod_exit(void) ++{ ++} ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Synopsys, Inc."); ++module_init(pdu_mod_init); ++module_exit(pdu_mod_exit); +diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c +--- a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,191 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2011-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "elppdu.h" ++ ++static unsigned long vex_baseaddr = PDU_BASE_ADDR; ++module_param_named(baseaddr, vex_baseaddr, ulong, 0); ++MODULE_PARM_DESC(baseaddr, "Hardware base address (default " __stringify(PDU_BASE_ADDR) ")"); ++ ++// max of 16 devices ++#define MAX_DEV 16 ++ ++static struct platform_device *devices[MAX_DEV]; ++static int dev_id; ++ ++static void register_device(const char *name, int id, ++ const struct resource *res, unsigned int num) ++{ ++ char suffix[16] = ""; ++ struct platform_device_info pdevinfo = { ++ .name = name, ++ .id = id, ++ .res = res, ++ .num_res = num, ++ .dma_mask = 0xffffffff, ++ }; ++ ++ if (dev_id >= MAX_DEV) { ++ pr_err("Too many devices; increase MAX_DEV.\n"); ++ return; ++ } ++ ++ devices[dev_id] = platform_device_register_full(&pdevinfo); ++ if (IS_ERR(devices[dev_id])) { ++ if (id >= 0) ++ snprintf(suffix, sizeof(suffix), ".%d", id); ++ pr_err("Failed to register %s%s\n", name, suffix); ++ ++ devices[dev_id] = NULL; ++ return; ++ } ++ ++ dev_id++; ++} ++ ++static int __init get_irq_num(unsigned int irq_num) ++{ ++ if (IS_ENABLED(CONFIG_ARCH_ZYNQ)) { ++ struct of_phandle_args args = { 0 }; ++ ++ /* ++ * Since this driver is for non-DT use but Zynq uses DT to setup IRQs, ++ * find the GIC by searching for its DT node then manually create the ++ * IRQ mappings. ++ */ ++ ++ do { ++ args.np = of_find_node_with_property(args.np, ++ "interrupt-controller"); ++ if (!args.np) { ++ pr_err("cannot find IRQ controller"); ++ return -ENODEV; ++ } ++ } while (!of_device_is_compatible(args.np, "arm,cortex-a9-gic")); ++ ++ if (irq_num < 32 || irq_num >= 96) { ++ pr_err("SPI interrupts must be in the range [32,96) on Zynq\n"); ++ return -EINVAL; ++ } ++ ++ args.args_count = 3; ++ args.args[0] = 0; /* SPI */ ++ args.args[1] = irq_num - 32; ++ args.args[2] = 4; /* Active high, level-sensitive */ ++ ++ irq_num = irq_create_of_mapping(&args); ++ of_node_put(args.np); ++ if (irq_num == 0) ++ return -EINVAL; ++ } ++ ++ if (irq_num > INT_MAX) ++ return -EINVAL; ++ ++ return irq_num; ++} ++ ++static int __init pdu_vex_mod_init(void) ++{ ++ int irq_num = get_irq_num(PDU_BASE_IRQ); ++ struct resource res[2]; ++#ifndef PDU_SINGLE_CORE ++ void *pdu_mem; ++ int i, rc; ++#endif ++ ++ if (irq_num >= 0) { ++ res[1] = (struct resource){ ++ .start = irq_num, ++ .end = irq_num, ++ .flags = IORESOURCE_IRQ, ++ }; ++ } else { ++ res[1] = (struct resource){ 0 }; ++ pr_err("IRQ setup failed (error %d), not using IRQs\n", ++ irq_num); ++ } ++ ++#ifdef PDU_SINGLE_BASIC_TRNG ++ res[0] = (struct resource){ ++ .start = vex_baseaddr, ++ .end = vex_baseaddr + 0x80 - 1, ++ .flags = IORESOURCE_MEM, ++ }; ++ register_device("basic_trng", -1, res, 2); ++#endif ++ ++#ifdef PDU_SINGLE_NIST_TRNG ++ res[0] = (struct resource){ ++ .start = vex_baseaddr, ++ .end = vex_baseaddr + 0x800 - 1, ++ .flags = IORESOURCE_MEM, ++ }; ++ register_device("nist_trng", -1, res, 2); ++#endif ++ ++ return 0; ++} ++module_init(pdu_vex_mod_init); ++ ++static void __exit pdu_vex_mod_exit(void) ++{ ++ int i; ++ ++ for (i = 0; i < MAX_DEV; i++) ++ platform_device_unregister(devices[i]); ++} ++module_exit(pdu_vex_mod_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Synopsys, Inc."); +diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h +--- a/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,63 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef NISTTRNG_H ++#define NISTTRNG_H ++ ++#include "synversion.h" ++#include "elppdu.h" ++#include "nisttrng_hw.h" ++#include "nisttrng_common.h" ++#include "nisttrng_private.h" ++ ++int nisttrng_init(struct nist_trng_state *state, u32 *base); ++int nisttrng_instantiate(struct nist_trng_state *state, int req_sec_strength, int pred_resist, void *personal_str); ++int nisttrng_uninstantiate(struct nist_trng_state *state); ++int nisttrng_reseed(struct nist_trng_state *state, int pred_resist, void *addin_str); ++int nisttrng_generate(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes, int req_sec_strength, int pred_resist, void *addin_str); ++int nisttrng_rbc(struct nist_trng_state *state, int enable, int rbc_num, int rate, int urun_blnk); ++int nisttrng_generate_public_vtrng(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes, int vtrng); ++#endif +diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h +--- a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,144 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++// ------------------------------------------------------------------------ ++// ++// (C) COPYRIGHT 2012 - 2016 SYNOPSYS, INC. ++// ALL RIGHTS RESERVED ++// ++// (C) COPYRIGHT 2012-2016 Synopsys, Inc. ++// This Synopsys software and all associated documentation are ++// proprietary to Synopsys, Inc. and may only be used pursuant ++// to the terms and conditions of a written license agreement ++// with Synopsys, Inc. All other use, reproduction, modification, ++// or distribution of the Synopsys software or the associated ++// documentation is strictly prohibited. ++// ++// ------------------------------------------------------------------------ ++ ++#ifndef NISTTRNG_COMMON_H ++#define NISTTRNG_COMMON_H ++ ++#define NIST_TRNG_RETRY_MAX 5000000UL ++ ++#define NIST_DFLT_MAX_BITS_PER_REQ BIT(19) ++#define NIST_DFLT_MAX_REQ_PER_SEED BIT(48) ++ ++/* Do not change the following parameters */ ++#define NIST_TRNG_DFLT_MAX_REJECTS 10 ++ ++#define DEBUG(...) ++//#define DEBUG(...) printk(__VA_ARGS__) ++ ++enum nisttrng_sec_strength { ++ SEC_STRNT_AES128 = 0, ++ SEC_STRNT_AES256 = 1 ++}; ++ ++enum nisttrng_drbg_arch { ++ AES128 = 0, ++ AES256 = 1 ++}; ++ ++enum nisttrng_current_state { ++ NIST_TRNG_STATE_INITIALIZE = 0, ++ NIST_TRNG_STATE_UNINSTANTIATE, ++ NIST_TRNG_STATE_INSTANTIATE, ++ NIST_TRNG_STATE_RESEED, ++ NIST_TRNG_STATE_GENERATE ++}; ++ ++struct nist_trng_state { ++ u32 *base; ++ ++ /* Hardware features and build ID */ ++ struct { ++ struct { ++ enum nisttrng_drbg_arch drbg_arch; ++ unsigned int extra_ps_present, ++ secure_rst_state, ++ diag_level_basic_trng, ++ diag_level_stat_hlt, ++ diag_level_ns; ++ } features; ++ ++ struct { ++ unsigned int ext_enum, ++ ext_ver, ++ rel_num; ++ } corekit_rel; ++ ++ struct { ++ unsigned int core_type, ++ bg8, ++ cdc_synch_depth, ++ background_noise, ++ edu_present, ++ aes_datapath, ++ aes_max_key_size, ++ personilzation_str; ++ } build_cfg0; ++ ++ struct { ++ unsigned int num_raw_noise_blks, ++ sticky_startup, ++ auto_correlation_test, ++ mono_bit_test, ++ run_test, ++ poker_test, ++ raw_ht_adap_test, ++ raw_ht_rep_test, ++ ent_src_rep_smpl_size, ++ ent_src_rep_test, ++ ent_src_rep_min_entropy; ++ } build_cfg1; ++ ++ struct { ++ unsigned int rbc2_rate_width, ++ rbc1_rate_width, ++ rbc0_rate_width, ++ public_vtrng_channels, ++ esm_channel, ++ rbc_channels, ++ fifo_depth; ++ } edu_build_cfg0; ++ } config; ++ ++ /* status */ ++ struct { ++ //nist_trng_current_state current_state; ++ enum nisttrng_current_state current_state; // old for now ++ unsigned int nonce_mode, ++ secure_mode, ++ pred_resist; ++ //nist_trng_sec_strength sec_strength; ++ enum nisttrng_sec_strength sec_strength; ++ unsigned int pad_ps_addin; ++ unsigned int alarm_code; ++ // Private VTRNG STAT, all the public trng will have the same STAT as public TRNG in terms of ++ // rnc_enabled and seed_enum ++ struct { ++ unsigned int seed_enum, ++ rnc_enabled; ++ } edu_vstat; ++ } status; ++ ++ /* reminders and alarms */ ++ struct { ++ unsigned long max_bits_per_req; ++ unsigned long long max_req_per_seed; ++ unsigned long bits_per_req_left; ++ unsigned long long req_per_seed_left; ++ } counters; ++}; ++ ++#define nist_trng_zero_status(x) \ ++ memset(&((x)->status), 0, sizeof((x)->status)) ++ ++#define DRBG_INSTANTIATED(cs) \ ++ ((((cs) == NIST_TRNG_STATE_INSTANTIATE) || \ ++ ((cs) == NIST_TRNG_STATE_RESEED) || \ ++ ((cs) == NIST_TRNG_STATE_GENERATE)) ? 1 : 0) ++ ++#define REQ_SEC_STRENGTH_IS_VALID(sec_st) \ ++ ((((sec_st) > 0) && ((sec_st) <= 256)) ? 1 : 0) ++ ++#endif +diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h +--- a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,457 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef NISTTRNG_HW_H ++#define NISTTRNG_HW_H ++ ++/* HW related Parameters */ ++#define NIST_TRNG_RAND_BLK_SIZE_BITS 128 ++#define CHX_URUN_BLANK_AFTER_RESET 0x3 ++ ++/* registers */ ++#define NIST_TRNG_REG_CTRL 0x00 ++#define NIST_TRNG_REG_MODE 0x01 ++#define NIST_TRNG_REG_SMODE 0x02 ++#define NIST_TRNG_REG_STAT 0x03 ++#define NIST_TRNG_REG_IE 0x04 ++#define NIST_TRNG_REG_ISTAT 0x05 ++#define NIST_TRNG_REG_ALARM 0x06 ++#define NIST_TRNG_REG_COREKIT_REL 0x07 ++#define NIST_TRNG_REG_FEATURES 0x08 ++#define NIST_TRNG_REG_RAND0 0x09 ++#define NIST_TRNG_REG_RAND1 0x0A ++#define NIST_TRNG_REG_RAND2 0x0B ++#define NIST_TRNG_REG_RAND3 0x0C ++#define NIST_TRNG_REG_NPA_DATA0 0x0D ++#define NIST_TRNG_REG_NPA_DATA1 0x0E ++#define NIST_TRNG_REG_NPA_DATA2 0x0F ++#define NIST_TRNG_REG_NPA_DATA3 0x10 ++#define NIST_TRNG_REG_NPA_DATA4 0x11 ++#define NIST_TRNG_REG_NPA_DATA5 0x12 ++#define NIST_TRNG_REG_NPA_DATA6 0x13 ++#define NIST_TRNG_REG_NPA_DATA7 0x14 ++#define NIST_TRNG_REG_NPA_DATA8 0x15 ++#define NIST_TRNG_REG_NPA_DATA9 0x16 ++#define NIST_TRNG_REG_NPA_DATA10 0x17 ++#define NIST_TRNG_REG_NPA_DATA11 0x18 ++#define NIST_TRNG_REG_NPA_DATA12 0x19 ++#define NIST_TRNG_REG_NPA_DATA13 0x1A ++#define NIST_TRNG_REG_NPA_DATA14 0x1B ++#define NIST_TRNG_REG_NPA_DATA15 0x1C ++#define NIST_TRNG_REG_SEED0 0x1D ++#define NIST_TRNG_REG_SEED1 0x1E ++#define NIST_TRNG_REG_SEED2 0x1F ++#define NIST_TRNG_REG_SEED3 0x20 ++#define NIST_TRNG_REG_SEED4 0x21 ++#define NIST_TRNG_REG_SEED5 0x22 ++#define NIST_TRNG_REG_SEED6 0x23 ++#define NIST_TRNG_REG_SEED7 0x24 ++#define NIST_TRNG_REG_SEED8 0x25 ++#define NIST_TRNG_REG_SEED9 0x26 ++#define NIST_TRNG_REG_SEED10 0x27 ++#define NIST_TRNG_REG_SEED11 0x28 ++#define NIST_TRNG_REG_TIME_TO_SEED 0x34 ++#define NIST_TRNG_REG_IA_RDATA 0x38 ++#define NIST_TRNG_REG_IA_WDATA 0x39 ++#define NIST_TRNG_REG_IA_ADDR 0x3A ++#define NIST_TRNG_REG_IA_CMD 0x3B ++#define NIST_TRNG_REG_BUILD_CFG0 0x3C ++#define NIST_TRNG_REG_BUILD_CFG1 0x3D ++ ++/* nist edu registers */ ++#define NIST_TRNG_EDU_RNC_CTRL 0x100 ++#define NIST_TRNG_EDU_FLUSH_CTRL 0x101 ++#define NIST_TRNG_EDU_RESEED_CNTR 0x102 ++#define NIST_TRNG_EDU_RBC_CTRL 0x104 ++#define NIST_TRNG_EDU_STAT 0x106 ++#define NIST_TRNG_EDU_IE 0x108 ++#define NIST_TRNG_EDU_ISTAT 0x109 ++#define NIST_TRNG_EDU_BUILD_CFG0 0x12C ++#define NIST_TRNG_EDU_VCTRL 0x138 ++#define NIST_TRNG_EDU_VSTAT 0x139 ++#define NIST_TRNG_EDU_VIE 0x13A ++#define NIST_TRNG_EDU_VISTAT 0x13B ++#define NIST_TRNG_EDU_VRAND_0 0x13C ++#define NIST_TRNG_EDU_VRAND_1 0x13D ++#define NIST_TRNG_EDU_VRAND_2 0x13E ++#define NIST_TRNG_EDU_VRAND_3 0x13F ++ ++/* edu vtrng registers */ ++#define NIST_TRNG_EDU_VTRNG_VCTRL0 0x180 ++#define NIST_TRNG_EDU_VTRNG_VSTAT0 0x181 ++#define NIST_TRNG_EDU_VTRNG_VIE0 0x182 ++#define NIST_TRNG_EDU_VTRNG_VISTAT0 0x183 ++#define NIST_TRNG_EDU_VTRNG_VRAND0_0 0x184 ++#define NIST_TRNG_EDU_VTRNG_VRAND0_1 0x185 ++#define NIST_TRNG_EDU_VTRNG_VRAND0_2 0x186 ++#define NIST_TRNG_EDU_VTRNG_VRAND0_3 0x187 ++#define NIST_TRNG_EDU_VTRNG_VCTRL1 0x188 ++#define NIST_TRNG_EDU_VTRNG_VSTAT1 0x189 ++#define NIST_TRNG_EDU_VTRNG_VIE1 0x18A ++#define NIST_TRNG_EDU_VTRNG_VISTAT1 0x18B ++#define NIST_TRNG_EDU_VTRNG_VRAND1_0 0x18C ++#define NIST_TRNG_EDU_VTRNG_VRAND1_1 0x18D ++#define NIST_TRNG_EDU_VTRNG_VRAND1_2 0x18E ++#define NIST_TRNG_EDU_VTRNG_VRAND1_3 0x18F ++#define NIST_TRNG_EDU_VTRNG_VCTRL2 0x190 ++#define NIST_TRNG_EDU_VTRNG_VSTAT2 0x191 ++#define NIST_TRNG_EDU_VTRNG_VIE2 0x192 ++#define NIST_TRNG_EDU_VTRNG_VISTAT2 0x193 ++#define NIST_TRNG_EDU_VTRNG_VRAND2_0 0x194 ++#define NIST_TRNG_EDU_VTRNG_VRAND2_1 0x195 ++#define NIST_TRNG_EDU_VTRNG_VRAND2_2 0x196 ++#define NIST_TRNG_EDU_VTRNG_VRAND2_3 0x197 ++#define NIST_TRNG_EDU_VTRNG_VCTRL3 0x198 ++#define NIST_TRNG_EDU_VTRNG_VSTAT3 0x199 ++#define NIST_TRNG_EDU_VTRNG_VIE3 0x19A ++#define NIST_TRNG_EDU_VTRNG_VISTAT3 0x19B ++#define NIST_TRNG_EDU_VTRNG_VRAND3_0 0x19C ++#define NIST_TRNG_EDU_VTRNG_VRAND3_1 0x19D ++#define NIST_TRNG_EDU_VTRNG_VRAND3_2 0x19E ++#define NIST_TRNG_EDU_VTRNG_VRAND3_3 0x19F ++#define NIST_TRNG_EDU_VTRNG_VCTRL4 0x1A0 ++#define NIST_TRNG_EDU_VTRNG_VSTAT4 0x1A1 ++#define NIST_TRNG_EDU_VTRNG_VIE4 0x1A2 ++#define NIST_TRNG_EDU_VTRNG_VISTAT4 0x1A3 ++#define NIST_TRNG_EDU_VTRNG_VRAND4_0 0x1A4 ++#define NIST_TRNG_EDU_VTRNG_VRAND4_1 0x1A5 ++#define NIST_TRNG_EDU_VTRNG_VRAND4_2 0x1A6 ++#define NIST_TRNG_EDU_VTRNG_VRAND4_3 0x1A7 ++#define NIST_TRNG_EDU_VTRNG_VCTRL5 0x1A8 ++#define NIST_TRNG_EDU_VTRNG_VSTAT5 0x1A9 ++#define NIST_TRNG_EDU_VTRNG_VIE5 0x1AA ++#define NIST_TRNG_EDU_VTRNG_VISTAT5 0x1AB ++#define NIST_TRNG_EDU_VTRNG_VRAND5_0 0x1AC ++#define NIST_TRNG_EDU_VTRNG_VRAND5_1 0x1AD ++#define NIST_TRNG_EDU_VTRNG_VRAND5_2 0x1AE ++#define NIST_TRNG_EDU_VTRNG_VRAND5_3 0x1AF ++#define NIST_TRNG_EDU_VTRNG_VCTRL6 0x1B0 ++#define NIST_TRNG_EDU_VTRNG_VSTAT6 0x1B1 ++#define NIST_TRNG_EDU_VTRNG_VIE6 0x1B2 ++#define NIST_TRNG_EDU_VTRNG_VISTAT6 0x1B3 ++#define NIST_TRNG_EDU_VTRNG_VRAND6_0 0x1B4 ++#define NIST_TRNG_EDU_VTRNG_VRAND6_1 0x1B5 ++#define NIST_TRNG_EDU_VTRNG_VRAND6_2 0x1B6 ++#define NIST_TRNG_EDU_VTRNG_VRAND6_3 0x1B7 ++#define NIST_TRNG_EDU_VTRNG_VCTRL7 0x1B8 ++#define NIST_TRNG_EDU_VTRNG_VSTAT7 0x1B9 ++#define NIST_TRNG_EDU_VTRNG_VIE7 0x1BA ++#define NIST_TRNG_EDU_VTRNG_VISTAT7 0x1BB ++#define NIST_TRNG_EDU_VTRNG_VRAND7_0 0x1BC ++#define NIST_TRNG_EDU_VTRNG_VRAND7_1 0x1BD ++#define NIST_TRNG_EDU_VTRNG_VRAND7_2 0x1BE ++#define NIST_TRNG_EDU_VTRNG_VRAND7_3 0x1BF ++ ++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_NOP 0x0 ++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM 0x1 ++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_INIT 0x2 ++ ++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_MASK 0x3Ul ++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(y, x) (((y) & ~(NIST_TRNG_EDU_VTRNG_VCTRL_CMD_MASK)) | ((x))) ++ ++/* CTRL */ ++#define NIST_TRNG_REG_CTRL_CMD_NOP 0 ++#define NIST_TRNG_REG_CTRL_CMD_GEN_NOISE 1 ++#define NIST_TRNG_REG_CTRL_CMD_GEN_NONCE 2 ++#define NIST_TRNG_REG_CTRL_CMD_CREATE_STATE 3 ++#define NIST_TRNG_REG_CTRL_CMD_RENEW_STATE 4 ++#define NIST_TRNG_REG_CTRL_CMD_REFRESH_ADDIN 5 ++#define NIST_TRNG_REG_CTRL_CMD_GEN_RANDOM 6 ++#define NIST_TRNG_REG_CTRL_CMD_ADVANCE_STATE 7 ++#define NIST_TRNG_REG_CTRL_CMD_KAT 8 ++#define NIST_TRNG_REG_CTRL_CMD_ZEROIZE 15 ++ ++/* EDU CTRL */ ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_HOLD 0 ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE 1 ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_IDLE 2 ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE 3 ++ ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_MASK 0x3Ul ++#define NIST_TRNG_EDU_RNC_CTRL_CMD_SET(y, x) (((y) & ~(NIST_TRNG_EDU_RNC_CTRL_CMD_MASK)) | ((x))) ++ ++/* EDU_FLUSH_CTRL */ ++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC 3 ++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC 2 ++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC 1 ++#define _NIST_TRNG_EDU_FLUSH_CTRL_FIFO 0 ++ ++#define NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC) ++#define NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC) ++#define NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC) ++#define NIST_TRNG_EDU_FLUSH_CTRL_FIFO BIT(_NIST_TRNG_EDU_FLUSH_CTRL_FIFO) ++ ++/*EDU_RBC_CTRL*/ ++#define _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK 28 ++#define _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK 26 ++#define _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK 24 ++#define _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE 16 ++#define _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE 8 ++#define _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE 0 ++ ++#define _NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK 0xFUL ++#define _NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK 0x3UL ++ ++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(z, y, x) (((y) & ~(_NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK << (x))) | ((z) << (x))) ++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(z, y, x) (((y) & ~(_NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK << (x))) | ((z) << (x))) ++ ++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(y, x) ((_NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK) & ((y) >> (x))) ++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(y, x) ((_NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK) & ((y) >> (x))) ++ ++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE_AFTER_RESET 0x0 ++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK_AFTER_RESET 0x3 ++ ++/* MODE */ ++#define _NIST_TRNG_REG_MODE_KAT_SEL 7 ++#define _NIST_TRNG_REG_MODE_KAT_VEC 5 ++#define _NIST_TRNG_REG_MODE_ADDIN_PRESENT 4 ++#define _NIST_TRNG_REG_MODE_PRED_RESIST 3 ++#define _NIST_TRNG_REG_MODE_SEC_ALG 0 ++ ++#define NIST_TRNG_REG_MODE_ADDIN_PRESENT BIT(_NIST_TRNG_REG_MODE_ADDIN_PRESENT) ++#define NIST_TRNG_REG_MODE_PRED_RESIST BIT(_NIST_TRNG_REG_MODE_PRED_RESIST) ++#define NIST_TRNG_REG_MODE_SEC_ALG BIT(_NIST_TRNG_REG_MODE_SEC_ALG) ++ ++/* SMODE */ ++#define _NIST_TRNG_REG_SMODE_NOISE_COLLECT 31 ++#define _NIST_TRNG_REG_SMODE_INDIV_HT_DISABLE 16 ++#define _NIST_TRNG_REG_SMODE_MAX_REJECTS 2 ++#define _NIST_TRNG_REG_SMODE_MISSION_MODE 1 ++#define _NIST_TRNG_REG_SMODE_SECURE_EN _NIST_TRNG_REG_SMODE_MISSION_MODE ++#define _NIST_TRNG_REG_SMODE_NONCE 0 ++ ++#define NIST_TRNG_REG_SMODE_MAX_REJECTS(x) ((x) << _NIST_TRNG_REG_SMODE_MAX_REJECTS) ++#define NIST_TRNG_REG_SMODE_SECURE_EN(x) ((x) << _NIST_TRNG_REG_SMODE_SECURE_EN) ++#define NIST_TRNG_REG_SMODE_NONCE BIT(_NIST_TRNG_REG_SMODE_NONCE) ++ ++/* STAT */ ++#define _NIST_TRNG_REG_STAT_BUSY 31 ++#define _NIST_TRNG_REG_STAT_STARTUP_TEST_IN_PROG 10 ++#define _NIST_TRNG_REG_STAT_STARTUP_TEST_STUCK 9 ++#define _NIST_TRNG_REG_STAT_DRBG_STATE 7 ++#define _NIST_TRNG_REG_STAT_SECURE 6 ++#define _NIST_TRNG_REG_STAT_NONCE_MODE 5 ++#define _NIST_TRNG_REG_STAT_SEC_ALG 4 ++#define _NIST_TRNG_REG_STAT_LAST_CMD 0 ++ ++#define NIST_TRNG_REG_STAT_BUSY BIT(_NIST_TRNG_REG_STAT_BUSY) ++//#define NIST_TRNG_REG_STAT_DRBG_STATE (1UL<<_NIST_TRNG_REG_STAT_DRBG_STATE) ++//#define NIST_TRNG_REG_STAT_SECURE (1UL << _NIST_TRNG_REG_STAT_SECURE) ++//#define NIST_TRNG_REG_STAT_NONCE_MODE (1UL << _NIST_TRNG_REG_STAT_NONCE_MODE) ++//#define NIST_TRNG_REG_STAT_SEC_ALG (1UL << _NIST_TRNG_REG_STAT_SEC_ALG) ++//#define NIST_TRNG_REG_STAT_LAST_CMD(x) (((x) >> _NIST_TRNG_REG_STAT_LAST_CMD)&0xF) ++ ++/*EDU_STAT*/ ++ ++#define NIST_TRNG_EDU_STAT_FIFO_LEVEL(x) (((x) >> 24) & 255) ++#define NIST_TRNG_EDU_STAT_TTT_INDEX(x) (((x) >> 16) & 255) ++#define NIST_TRNG_EDU_STAT_RNC_BUSY(x) (((x) >> 3) & 7) ++#define NIST_TRNG_EDU_STAT_RNC_ENABLED(x) (((x) >> 2) & 1) ++#define NIST_TRNG_EDU_STAT_FIFO_EMPTY(x) (((x) >> 1) & 1) ++#define NIST_TRNG_EDU_STAT_FIFO_FULL(x) ((x) & 1) ++ ++/* IE */ ++#define _NIST_TRNG_REG_IE_GLBL 31 ++#define _NIST_TRNG_REG_IE_DONE 4 ++#define _NIST_TRNG_REG_IE_ALARMS 3 ++#define _NIST_TRNG_REG_IE_NOISE_RDY 2 ++#define _NIST_TRNG_REG_IE_KAT_COMPLETE 1 ++#define _NIST_TRNG_REG_IE_ZEROIZE 0 ++ ++#define NIST_TRNG_REG_IE_GLBL BIT(_NIST_TRNG_REG_IE_GLBL) ++#define NIST_TRNG_REG_IE_DONE BIT(_NIST_TRNG_REG_IE_DONE) ++#define NIST_TRNG_REG_IE_ALARMS BIT(_NIST_TRNG_REG_IE_ALARMS) ++#define NIST_TRNG_REG_IE_NOISE_RDY BIT(_NIST_TRNG_REG_IE_NOISE_RDY) ++#define NIST_TRNG_REG_IE_KAT_COMPLETE BIT(_NIST_TRNG_REG_IE_KAT_COMPLETE) ++#define NIST_TRNG_REG_IE_ZEROIZE BIT(_NIST_TRNG_REG_IE_ZEROIZE) ++ ++/* ISTAT */ ++#define _NIST_TRNG_REG_ISTAT_DONE 4 ++#define _NIST_TRNG_REG_ISTAT_ALARMS 3 ++#define _NIST_TRNG_REG_ISTAT_NOISE_RDY 2 ++#define _NIST_TRNG_REG_ISTAT_KAT_COMPLETE 1 ++#define _NIST_TRNG_REG_ISTAT_ZEROIZE 0 ++ ++#define NIST_TRNG_REG_ISTAT_DONE BIT(_NIST_TRNG_REG_ISTAT_DONE) ++#define NIST_TRNG_REG_ISTAT_ALARMS BIT(_NIST_TRNG_REG_ISTAT_ALARMS) ++#define NIST_TRNG_REG_ISTAT_NOISE_RDY BIT(_NIST_TRNG_REG_ISTAT_NOISE_RDY) ++#define NIST_TRNG_REG_ISTAT_KAT_COMPLETE BIT(_NIST_TRNG_REG_ISTAT_KAT_COMPLETE) ++#define NIST_TRNG_REG_ISTAT_ZEROIZE BIT(_NIST_TRNG_REG_ISTAT_ZEROIZE) ++ ++/*EDU_ISTAT*/ ++ ++#define _NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN 8 ++#define _NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN 7 ++#define _NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN 6 ++#define _NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG 5 ++#define _NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT 4 ++#define _NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE 3 ++#define _NIST_TRNG_EDU_ISTAT_FIFO_URUN 2 ++#define _NIST_TRNG_EDU_ISTAT_ACCESS_VIOL 1 ++#define _NIST_TRNG_EDU_ISTAT_RESEED_REMINDER 0 ++ ++#define NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN) ++#define NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN) ++#define NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN) ++#define NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG BIT(_NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG) ++#define NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT BIT(_NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT) ++#define NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE BIT(_NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE) ++#define NIST_TRNG_EDU_ISTAT_FIFO_URUN BIT(_NIST_TRNG_EDU_ISTAT_FIFO_URUN) ++#define NIST_TRNG_EDU_ISTAT_ACCESS_VIOL BIT(_NIST_TRNG_EDU_ISTAT_ACCESS_VIOL) ++#define NIST_TRNG_EDU_ISTAT_RESEED_REMINDER BIT(_NIST_TRNG_EDU_ISTAT_RESEED_REMINDER) ++ ++/* ALARMS */ ++#define NIST_TRNG_REG_ALARM_ILLEGAL_CMD_SEQ BIT(4) ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_OK 0 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_KAT_STAT 1 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_KAT 2 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_MONOBIT 3 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_RUN 4 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_LONGRUN 5 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_AUTOCORRELATION 6 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_POKER 7 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_REPETITION_COUNT 8 ++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_ADAPATIVE_PROPORTION 9 ++ ++/* COREKIT_REL */ ++#define NIST_TRNG_REG_EXT_ENUM(x) (((x) >> 28) & 0xF) ++#define NIST_TRNG_REG_EXT_VER(x) (((x) >> 23) & 0xFF) ++#define NIST_TRNG_REG_REL_NUM(x) ((x) & 0xFFFF) ++ ++// This will be deleted ?? per comments in hw details. ie use CFG ++/* FEATURES */ ++#define NIST_TRNG_REG_FEATURES_AES_256(x) (((x) >> 9) & 1) ++#define NIST_TRNG_REG_FEATURES_EXTRA_PS_PRESENT(x) (((x) >> 8) & 1) ++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_NS(x) (((x) >> 7) & 1) ++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_BASIC_TRNG(x) (((x) >> 4) & 7) ++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_ST_HLT(x) (((x) >> 1) & 7) ++#define NIST_TRNG_REG_FEATURES_SECURE_RST_STATE(x) ((x) & 1) ++ ++/* build_CFG0 */ ++#define NIST_TRNG_REG_CFG0_PERSONILIZATION_STR(x) (((x) >> 14) & 1) ++#define NIST_TRNG_REG_CFG0_AES_MAX_KEY_SIZE(x) (((x) >> 13) & 1) ++#define NIST_TRNG_REG_CFG0_AES_DATAPATH(x) (((x) >> 12) & 1) ++#define NIST_TRNG_REG_CFG0_EDU_PRESENT(x) (((x) >> 11) & 1) ++#define NIST_TRNG_REG_CFG0_BACGROUND_NOISE(x) (((x) >> 10) & 1) ++#define NIST_TRNG_REG_CFG0_CDC_SYNCH_DEPTH(x) (((x) >> 8) & 3) ++#define NIST_TRNG_REG_CFG0_BG8(x) (((x) >> 7) & 1) ++#define NIST_TRNG_REG_CFG0_CORE_TYPE(x) ((x) & 3) ++ ++/* build_CFG1 */ ++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_MIN_ENTROPY(x) (((x) >> 24) & 255) ++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_TEST(x) (((x) >> 23) & 1) ++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_SMPL_SIZE(x) (((x) >> 20) & 7) ++#define NIST_TRNG_REG_CFG1_RAW_HT_REP_TEST(x) (((x) >> 19) & 1) ++#define NIST_TRNG_REG_CFG1_RAW_HT_ADAP_TEST(x) (((x) >> 16) & 7) ++#define NIST_TRNG_REG_CFG1_POKER_TEST(x) (((x) >> 15) & 1) ++#define NIST_TRNG_REG_CFG1_RUN_TEST(x) (((x) >> 14) & 1) ++#define NIST_TRNG_REG_CFG1_MONO_BIT_TEST(x) (((x) >> 13) & 1) ++#define NIST_TRNG_REG_CFG1_AUTO_CORRELATION_TEST(x) (((x) >> 12) & 1) ++#define NIST_TRNG_REG_CFG1_STICKY_STARTUP(x) (((x) >> 8) & 1) ++#define NIST_TRNG_REG_CFG1_NUM_RAW_NOISE_BLKS(x) ((x) & 255) ++ ++/* EDU_BUILD_CFG0 */ ++#define NIST_TRNG_REG_EDU_CFG0_RBC2_RATE_WIDTH(x) (((x) >> 20) & 7) ++#define NIST_TRNG_REG_EDU_CFG0_RBC1_RATE_WIDTH(x) (((x) >> 16) & 7) ++#define NIST_TRNG_REG_EDU_CFG0_RBC0_RATE_WIDTH(x) (((x) >> 12) & 7) ++#define NIST_TRNG_REG_EDU_CFG0_PUBLIC_VTRNG_CHANNELS(x) (((x) >> 8) & 15) ++#define NIST_TRNG_REG_EDU_CFG0_ESM_CHANNEL(x) (((x) >> 6) & 1) ++#define NIST_TRNG_REG_EDU_CFG0_RBC_CHANNELS(x) (((x) >> 4) & 3) ++#define NIST_TRNG_REG_EDU_CFG0_FIFO_DEPTH(x) (((x) >> 2) & 7) ++ ++/* EDU_VSTAT */ ++#define NIST_TRNG_REG_EDU_VSTAT_BUSY(x) (((x) >> 31) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_RNC_ENABLED(x) (((x) >> 30) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SEED_ENUM(x) (((x) >> 28) & 3) ++#define NIST_TRNG_REG_EDU_VSTAT_RWUE(x) (((x) >> 27) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_RWNE(x) (((x) >> 26) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SRWE(x) (((x) >> 25) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(x) (((x) >> 24) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_BCKGRND_NOISE(x) (((x) >> 23) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_RNC_FIFO_EMPTY(x) (((x) >> 22) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI3(x) (((x) >> 15) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI2(x) (((x) >> 14) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI1(x) (((x) >> 13) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI0(x) (((x) >> 12) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(x) (((x) >> 11) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(x) (((x) >> 10) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(x) (((x) >> 9) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(x) (((x) >> 8) & 1) ++#define NIST_TRNG_REG_EDU_VSTAT_CURRENT_CMD(x) (((x) >> 4) & 15) ++#define NIST_TRNG_REG_EDU_VSTAT_LAST_CMD(x) ((x) & 15) ++ ++#define _NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK 255UL ++#define _NIST_TRNG_REG_SMODE_SECURE_EN_MASK 1UL ++#define _NIST_TRNG_REG_SMODE_NONCE_MASK 1UL ++#define _NIST_TRNG_REG_MODE_SEC_ALG_MASK 1UL ++#define _NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK 1UL ++#define _NIST_TRNG_REG_MODE_PRED_RESIST_MASK 1UL ++#define _NIST_TRNG_REG_MODE_KAT_SEL_MASK 3UL ++#define _NIST_TRNG_REG_MODE_KAT_VEC_MASK 3UL ++#define _NIST_TRNG_REG_STAT_DRBG_STATE_MASK 3UL ++#define _NIST_TRNG_REG_STAT_SECURE_MASK 1UL ++#define _NIST_TRNG_REG_STAT_NONCE_MASK 1UL ++ ++#define NIST_TRNG_REG_SMODE_SET_MAX_REJECTS(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK << _NIST_TRNG_REG_SMODE_MAX_REJECTS)) | ((x) << _NIST_TRNG_REG_SMODE_MAX_REJECTS)) ++#define NIST_TRNG_REG_SMODE_SET_SECURE_EN(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_SECURE_EN_MASK << _NIST_TRNG_REG_SMODE_SECURE_EN)) | ((x) << _NIST_TRNG_REG_SMODE_SECURE_EN)) ++#define NIST_TRNG_REG_SMODE_SET_NONCE(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_NONCE_MASK << _NIST_TRNG_REG_SMODE_NONCE)) | ((x) << _NIST_TRNG_REG_SMODE_NONCE)) ++#define NIST_TRNG_REG_SMODE_GET_MAX_REJECTS(x) (((x) >> _NIST_TRNG_REG_SMODE_MAX_REJECTS) & _NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK) ++#define NIST_TRNG_REG_SMODE_GET_SECURE_EN(x) (((x) >> _NIST_TRNG_REG_SMODE_SECURE_EN) & _NIST_TRNG_REG_SMODE_SECURE_EN_MASK) ++#define NIST_TRNG_REG_SMODE_GET_NONCE(x) (((x) >> _NIST_TRNG_REG_SMODE_NONCE) & _NIST_TRNG_REG_SMODE_NONCE_MASK) ++ ++#define NIST_TRNG_REG_MODE_SET_SEC_ALG(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_SEC_ALG_MASK << _NIST_TRNG_REG_MODE_SEC_ALG)) | ((x) << _NIST_TRNG_REG_MODE_SEC_ALG)) ++#define NIST_TRNG_REG_MODE_SET_PRED_RESIST(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_PRED_RESIST_MASK << _NIST_TRNG_REG_MODE_PRED_RESIST)) | ((x) << _NIST_TRNG_REG_MODE_PRED_RESIST)) ++#define NIST_TRNG_REG_MODE_SET_ADDIN_PRESENT(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK << _NIST_TRNG_REG_MODE_ADDIN_PRESENT)) | ((x) << _NIST_TRNG_REG_MODE_ADDIN_PRESENT)) ++#define NIST_TRNG_REG_MODE_SET_KAT_SEL(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_KAT_SEL_MASK << _NIST_TRNG_REG_MODE_KAT_SEL)) | ((x) << _NIST_TRNG_REG_MODE_KAT_SEL)) ++#define NIST_TRNG_REG_MODE_SET_KAT_VEC(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_KAT_VEC_MASK << _NIST_TRNG_REG_MODE_KAT_VEC)) | ((x) << _NIST_TRNG_REG_MODE_KAT_VEC)) ++#define NIST_TRNG_REG_MODE_GET_SEC_ALG(x) (((x) >> _NIST_TRNG_REG_MODE_SEC_ALG) & _NIST_TRNG_REG_MODE_SEC_ALG_MASK) ++#define NIST_TRNG_REG_MODE_GET_PRED_RESIST(x) (((x) >> _NIST_TRNG_REG_MODE_PRED_RESIST) & _NIST_TRNG_REG_MODE_PRED_RESIST_MASK) ++#define NIST_TRNG_REG_MODE_GET_ADDIN_PRESENT(x) (((x) >> _NIST_TRNG_REG_MODE_ADDIN_PRESENT) & _NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK) ++#define NIST_TRNG_REG_STAT_GET_DRBG_STATE(x) (((x) >> _NIST_TRNG_REG_STAT_DRBG_STATE) & _NIST_TRNG_REG_STAT_DRBG_STATE_MASK) ++#define NIST_TRNG_REG_STAT_GET_SECURE(x) (((x) >> _NIST_TRNG_REG_STAT_SECURE) & _NIST_TRNG_REG_STAT_SECURE_MASK) ++#define NIST_TRNG_REG_STAT_GET_NONCE(x) (((x) >> _NIST_TRNG_REG_STAT_NONCE_MODE) & _NIST_TRNG_REG_STAT_NONCE_MASK) ++ ++#endif +diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h +--- a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,89 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef NISTTRNG_PRIVATE_H ++#define NISTTRNG_PRIVATE_H ++ ++#include "elppdu.h" ++#include "nisttrng_hw.h" ++#include "nisttrng_common.h" ++ ++int nisttrng_wait_on_busy(struct nist_trng_state *state); ++int nisttrng_wait_on_done(struct nist_trng_state *state); ++int nisttrng_wait_on_noise_rdy(struct nist_trng_state *state); ++int nisttrng_get_alarms(struct nist_trng_state *state); ++int nisttrng_reset_state(struct nist_trng_state *state); ++ ++/* ---------- Reminders ---------- */ ++int nisttrng_reset_counters(struct nist_trng_state *state); ++int nisttrng_set_reminder_max_bits_per_req(struct nist_trng_state *state, unsigned long max_bits_per_req); ++int nisttrng_set_reminder_max_req_per_seed(struct nist_trng_state *state, unsigned long long max_req_per_seed); ++int nisttrng_check_seed_lifetime(struct nist_trng_state *state); ++ ++/* ---------- Set field APIs ---------- */ ++int nisttrng_set_sec_strength(struct nist_trng_state *state, int req_sec_strength); ++int nisttrng_set_addin_present(struct nist_trng_state *state, int addin_present); ++int nisttrng_set_pred_resist(struct nist_trng_state *state, int pred_resist); ++int nisttrng_set_secure_mode(struct nist_trng_state *state, int secure_mode); ++int nisttrng_set_nonce_mode(struct nist_trng_state *state, int nonce_mode); ++ ++/* ---------- Load data APIs ---------- */ ++int nisttrng_load_ps_addin(struct nist_trng_state *state, void *input_str); ++ ++/* ---------- Command APIs ---------- */ ++int nisttrng_get_entropy_input(struct nist_trng_state *state, void *input_nonce, int nonce_operation); ++int nisttrng_refresh_addin(struct nist_trng_state *state, void *addin_str); ++int nisttrng_gen_random(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes); ++int nisttrng_advance_state(struct nist_trng_state *state); ++int nisttrng_kat(struct nist_trng_state *state, int kat_sel, int kat_vec); ++int nisttrng_full_kat(struct nist_trng_state *state); ++int nisttrng_zeroize(struct nist_trng_state *state); ++ ++/* ---------- edu related ---------- */ ++ ++int nisttrng_rnc(struct nist_trng_state *state, int rnc_ctrl_cmd); ++int nisttrng_wait_fifo_full(struct nist_trng_state *state); ++#endif +diff --git a/drivers/char/hw_random/dwc/src/trng/include/synversion.h b/drivers/char/hw_random/dwc/src/trng/include/synversion.h +--- a/drivers/char/hw_random/dwc/src/trng/include/synversion.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/include/synversion.h 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,52 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#ifndef VERSION_H ++#define VERSION_H ++ ++#define TRNG_VERSION "1.00a" ++ ++#endif +diff --git a/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c b/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c +--- a/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c 2025-12-23 10:16:19.524059486 +0000 +@@ -0,0 +1,2170 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2017 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "nisttrng.h" ++ ++#define SYNOPSYS_HWRNG_DRIVER_NAME "hwrng-nist_trng" ++ ++#define num_gen_bytes 64 ++static unsigned long max_reads = 128; ++ ++struct synopsys_nisttrng_driver { ++ struct nist_trng_state nisttrng; ++ void *hwrng_drv; ++ void *crypto_drv; ++ unsigned char rand_out[num_gen_bytes]; ++}; ++ ++static unsigned int xxd_vtrng; ++ ++static void nisttrng_reinit(struct nist_trng_state *nist_trng) ++{ ++ int err; ++ ++ err = nisttrng_uninstantiate(nist_trng); ++ if (err && err != CRYPTO_NOT_INSTANTIATED) ++ goto ERR; ++ ++ err = nisttrng_instantiate(nist_trng, 128, 1, NULL); ++ if (err) ++ goto ERR; ++ ++ERR: ++ DEBUG("NIST_TRNG: Trying to reinitialize after a fatal alarm: %d\n", ++ err); ++} ++ ++static int nisttrng_platform_driver_read(struct platform_device *pdev, ++ void *buf, size_t max, bool wait) ++{ ++ struct synopsys_nisttrng_driver *data = 0; ++ int nisttrng_error = -1; ++ u32 *out = kmalloc(max, GFP_KERNEL); ++ unsigned int vtrng; ++ ++ if (!out) { ++ SYNHW_PRINT("memory not allocated\n"); ++ return -1; ++ } ++ ++ if (!pdev || !buf || !max) ++ return nisttrng_error; ++ ++ data = platform_get_drvdata(pdev); ++ if (data == 0) ++ return nisttrng_error; ++ ++ if (data->nisttrng.config.build_cfg0.edu_present) { ++ vtrng = xxd_vtrng % ((data->nisttrng.config.edu_build_cfg0 ++ .public_vtrng_channels) + ++ 1); ++ if (vtrng == 0) { ++ /* private vtrng */ ++ nisttrng_error = nisttrng_generate(&data->nisttrng, out, max, ++ data->nisttrng.status.sec_strength ? 256 : 128, ++ data->nisttrng.status.pred_resist, NULL); ++ } else { ++ /* public vtrng */ ++ nisttrng_error = nisttrng_generate_public_vtrng(&data->nisttrng, out, max, vtrng - 1); ++ } ++ xxd_vtrng++; ++ } else { ++ /* nist core vtrng */ ++ nisttrng_error = nisttrng_generate(&data->nisttrng, out, max, ++ data->nisttrng.status.sec_strength ? 256 : 128, ++ data->nisttrng.status.pred_resist, NULL); ++ } ++ if (nisttrng_error < 0) { ++ if (data->nisttrng.status.alarm_code) ++ nisttrng_reinit(&data->nisttrng); ++ ++ return nisttrng_error; ++ } ++ ++ memcpy(buf, out, max); ++ kfree(out); ++ ++ return max; ++} ++ ++static int nisttrng_hwrng_driver_read(struct hwrng *rng, void *buf, size_t max, ++ bool wait) ++{ ++ struct platform_device *pdev = 0; ++ ++ if (rng == 0) ++ return -1; ++ ++ pdev = (struct platform_device *)rng->priv; ++ return nisttrng_platform_driver_read(pdev, buf, max, wait); ++} ++ ++static ssize_t ckr_show(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "rel_num=%u, ext_ver=%u, ext_enum=%u\n", ++ priv->nisttrng.config.corekit_rel.rel_num, ++ priv->nisttrng.config.corekit_rel.ext_ver, ++ priv->nisttrng.config.corekit_rel.ext_enum); ++} ++ ++static ssize_t features_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, ++ "drbg_arch = %u, diag_basic_trng=%u, diag_st_hlt=%u, diag_ns=%u, secure_rst_state=%u, extra_ps_present=%u\n", ++ priv->nisttrng.config.features.drbg_arch, ++ priv->nisttrng.config.features.diag_level_basic_trng, ++ priv->nisttrng.config.features.diag_level_stat_hlt, ++ priv->nisttrng.config.features.diag_level_ns, ++ priv->nisttrng.config.features.secure_rst_state, ++ priv->nisttrng.config.features.extra_ps_present); ++} ++ ++static ssize_t secure_show(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%s\n", NIST_TRNG_REG_SMODE_GET_SECURE_EN(pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_SMODE)) ? "on" : "off"); ++} ++ ++static ssize_t secure_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = nisttrng_set_secure_mode(&priv->nisttrng, ++ sysfs_streq(buf, "on") ? 1 : 0); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t nonce_show(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%s\n", NIST_TRNG_REG_SMODE_GET_NONCE(pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_SMODE)) ? "on" : "off"); ++} ++ ++static ssize_t nonce_store(struct device *dev, struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = nisttrng_set_nonce_mode(&priv->nisttrng, ++ sysfs_streq(buf, "on") ? 1 : 0); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t sec_strength_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%s\n", ++ priv->nisttrng.status.sec_strength ? "256" : "128"); ++} ++ ++static ssize_t sec_strength_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ int tmp; ++ int ret; ++ ++ if (count > 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtoint(foo, 10, &tmp); ++ if (ret) ++ return ret; ++ ++ ret = nisttrng_set_sec_strength(&priv->nisttrng, tmp); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t rand_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ unsigned int x; ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ for (x = 0; x < 4; x++) { ++ sprintf(buf + 8 * x, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_RAND0 + 3 - x)); ++ } ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t seed_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ unsigned int x; ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ for (x = 0; x < 12; x++) { ++ sprintf(buf + 8 * x, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_SEED0 + 11 - x)); ++ } ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t seed_reg_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int x, tmp; ++ int ret; ++ ++ // string must be at least 12 32-bit words long in 0 padded hex ++ if (count < (2 * 12 * 4)) ++ return -1; ++ ++ foo[8] = 0; ++ for (x = 0; x < 12; x++) { ++ memcpy(foo, buf + x * 8, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SEED0 + x, ++ tmp); ++ } ++ ++ return count; ++} ++ ++static ssize_t npa_data_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ unsigned int x; ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ for (x = 0; x < 16; x++) { ++ sprintf(buf + 8 * x, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_NPA_DATA0 + 15 - x)); ++ } ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t npa_data_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int x, tmp; ++ int ret; ++ ++ // string must be at least 16 32-bit words long in 0 padded hex ++ if (count < (2 * 16 * 4)) ++ return -1; ++ ++ foo[8] = 0; ++ for (x = 0; x < 16; x++) { ++ memcpy(foo, buf + x * 8, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_NPA_DATA0 + x, tmp); ++ } ++ ++ return count; ++} ++ ++static ssize_t ctrl_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_CTRL)); ++} ++ ++static ssize_t ctrl_reg_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_CTRL, tmp); ++ return count; ++} ++ ++static ssize_t istat_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_ISTAT)); ++} ++ ++static ssize_t istat_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_ISTAT, tmp); ++ return count; ++} ++ ++static ssize_t mode_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_MODE)); ++} ++ ++static ssize_t mode_reg_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_MODE, tmp); ++ ++ return count; ++} ++ ++static ssize_t smode_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_SMODE)); ++} ++ ++static ssize_t smode_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, tmp); ++ return count; ++} ++ ++static ssize_t alarm_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_ALARM)); ++} ++ ++static ssize_t alarm_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_ALARM, tmp); ++ return count; ++} ++ ++static ssize_t stat_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_STAT)); ++} ++ ++static ssize_t ia_wdata_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_WDATA, tmp); ++ return count; ++} ++ ++static ssize_t ia_wdata_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_WDATA)); ++} ++ ++static ssize_t ia_rdata_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_RDATA)); ++} ++ ++static ssize_t ia_addr_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR, tmp); ++ return count; ++} ++ ++static ssize_t ia_addr_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR)); ++} ++ ++static ssize_t ia_cmd_reg_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD, tmp); ++ return count; ++} ++ ++static ssize_t ia_cmd_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD)); ++} ++ ++static ssize_t rnc_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_EDU_RNC_CTRL)); ++} ++ ++static ssize_t rnc_reg_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned int tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtouint(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_EDU_RNC_CTRL, tmp); ++ ++ return count; ++} ++ ++static ssize_t rbc_reg_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_EDU_RBC_CTRL)); ++} ++ ++static ssize_t rbc_reg_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ char opts_str[5]; ++ unsigned int opts_int; ++ int enable, rbc_num, rate, urun_blnk, ret; ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ opts_str[4] = 0; ++ memcpy(opts_str, buf, 4); ++ ret = kstrtouint(opts_str, 16, &opts_int); ++ if (ret) ++ return ret; ++ ++ SYNHW_PRINT("%s %x\n", __func__, opts_int); ++ ++ enable = (opts_int >> 12 & 0xf); ++ if (enable > 1) { ++ SYNHW_PRINT("incorrect enable %x\n", enable); ++ return -1; ++ } ++ ++ rbc_num = (opts_int >> 8 & 0xf); ++ if (rbc_num > priv->nisttrng.config.edu_build_cfg0.rbc_channels - 1) { ++ SYNHW_PRINT("incorrect rbc_num %x\n", rbc_num); ++ return -1; ++ } ++ ++ rate = (opts_int >> 4 & 0xf); ++ if (rate > 8) { ++ SYNHW_PRINT("incorrect rate %x\n", rate); ++ return -1; ++ } ++ ++ urun_blnk = (opts_int & 0xf); ++ if (urun_blnk > 3) { ++ SYNHW_PRINT("incorrect urun_blnk %x\n", urun_blnk); ++ return -1; ++ } ++ ++ SYNHW_PRINT("enable %x rbc_num %x rate %x urun_blnk %x\n", enable, ++ rbc_num, rate, urun_blnk); ++ ++ ret = nisttrng_rbc(&priv->nisttrng, enable, rbc_num, rate, ++ urun_blnk); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t hw_state_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ u32 addr; ++ int i; ++ int tot_char; ++ ++ addr = 0x20; ++ tot_char = sprintf(buf, "Key = "); ++ for (i = 0; i < 8; i++) { ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR, ++ addr + 7 - i); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD, ++ 0x80000000); ++ tot_char += sprintf(buf + tot_char, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_IA_RDATA)); ++ } ++ tot_char += sprintf(buf + tot_char, "\n"); ++ ++ addr = 0x28; ++ tot_char += sprintf(buf + tot_char, "V = "); ++ for (i = 0; i < 4; i++) { ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR, ++ addr + 3 - i); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD, ++ 0x80000000); ++ tot_char += sprintf(buf + tot_char, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_IA_RDATA)); ++ } ++ ++ tot_char += sprintf(buf + tot_char, "\n"); ++ ++ return tot_char; ++} ++ ++static ssize_t max_bits_per_req_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ unsigned long tmp; ++ int ret; ++ ++ // string must be at least a 32-bit word in 0 padded hex ++ if (count < 8) ++ return -1; ++ ++ foo[8] = 0; ++ memcpy(foo, buf, 8); ++ ret = kstrtoul(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ ret = nisttrng_set_reminder_max_bits_per_req(&priv->nisttrng, ++ tmp); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t max_bits_per_req_show(struct device *dev, ++ struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08lx\n", ++ priv->nisttrng.counters.max_bits_per_req); ++} ++ ++static ssize_t max_req_per_seed_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[17]; ++ unsigned long long tmp; ++ int ret; ++ ++ // string must be at least a 64-bit word in 0 padded hex ++ if (count < 16) ++ return -1; ++ ++ foo[16] = 0; ++ memcpy(foo, buf, 16); ++ ret = kstrtoull(foo, 16, &tmp); ++ if (ret) ++ return ret; ++ ++ ret = nisttrng_set_reminder_max_req_per_seed(&priv->nisttrng, ++ tmp); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t max_req_per_seed_show(struct device *dev, ++ struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ return sprintf(buf, "%08llx\n", ++ priv->nisttrng.counters.max_req_per_seed); ++} ++ ++static ssize_t collect_ent_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int rep; ++ int i, j; ++ int ret; ++ u32 tmp; ++ int t; ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ // Change to TEST mode ++ DEBUG("Change to TEST mode\n"); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, 0x00000028); ++ // Turn on the noise collect mode ++ DEBUG("Turn on the noise collect mode\n"); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, 0x80000028); ++ ++ // issue generate entropy command ++ DEBUG("Issue a GEN_NOISE command\n"); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_GEN_NOISE); ++ ++ // read raw noise ++ // 2 reads if sec_strength is 128 and 3 reads if it is 256 ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) ++ rep = 2; ++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256) ++ rep = 3; ++ ++ for (i = 0; i < rep; i++) { ++ t = NIST_TRNG_RETRY_MAX; ++ tmp = 0; ++ DEBUG("Wait for NOISE_RDY interrupt.\n"); ++ do { ++ tmp = pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_ISTAT); ++ } while (!(tmp & (NIST_TRNG_REG_ISTAT_NOISE_RDY | ++ NIST_TRNG_REG_ISTAT_ALARMS)) && ++ --t); ++ ++ DEBUG("Read NPA_DATAx\n"); ++ for (j = 0; j < 16; j++) { ++ sprintf(buf + 128 * i + 8 * j, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_NPA_DATA0 + j)); ++ } ++ ++ // clear NOISE_RDY IRQ ++ DEBUG("Clear NOISE_RDY interrupt.\n"); ++ ret = nisttrng_wait_on_noise_rdy(&priv->nisttrng); ++ if (ret) ++ return -1; ++ } ++ ++ DEBUG("Wait for DONE\n"); ++ ret = nisttrng_wait_on_done(&priv->nisttrng); ++ if (ret) ++ return -1; ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t collect_ent_nsout_show(struct device *dev, ++ struct device_attribute *devattr, ++ char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int rep; ++ int i; ++ int ret; ++ ++ // generate entropy ++ ret = nisttrng_get_entropy_input(&priv->nisttrng, NULL, 0); ++ if (ret) ++ return -1; ++ ++ // read NS_OUTPUTx ++ // 32 reads if sec_strength is 128 and 48 reads if it is 256 ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) ++ rep = 32; ++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256) ++ rep = 48; ++ ++ for (i = 0; i < rep; i++) { ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR, ++ 0x70 + rep - 1 - i); ++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD, ++ 0x80000000); ++ sprintf(buf + 8 * i, "%08lx", ++ pdu_io_read32(priv->nisttrng.base + ++ NIST_TRNG_REG_IA_RDATA)); ++ } ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t nonce_seed_with_df_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ u32 seed[48] = { 0 }; ++ int rep; ++ int i; ++ int ret; ++ ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) ++ rep = 2; ++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256) ++ rep = 3; ++ ++ DEBUG("Number of char in input = %zu\n", count); ++ if (count != (rep * 128)) ++ return -1; ++ ++ foo[8] = 0; ++ for (i = 0; i < (rep * 16); i++) { ++ memcpy(foo, buf + i * 8, 8); ++ ret = kstrtouint(foo, 16, (seed + (rep * 16 - 1) - i)); ++ if (ret) ++ return ret; ++ } ++ ++ ret = nisttrng_get_entropy_input(&priv->nisttrng, seed, 1); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t nonce_seed_direct_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char foo[9]; ++ u32 seed[12] = { 0 }; ++ int rep; ++ int i; ++ int ret; ++ ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) ++ rep = 2; ++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256) ++ rep = 3; ++ ++ DEBUG("Number of char in input = %zu\n", count); ++ if (count != (rep * 32)) ++ return -1; ++ ++ foo[8] = 0; ++ for (i = 0; i < (rep * 4); i++) { ++ memcpy(foo, buf + i * 8, 8); ++ ret = kstrtouint(foo, 16, (seed + (rep * 4 - 1) - i)); ++ if (ret) ++ return ret; ++ } ++ ++ ret = nisttrng_get_entropy_input(&priv->nisttrng, seed, 0); ++ if (ret) ++ return -1; ++ ++ return count; ++} ++ ++static ssize_t instantiate_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char opts_str[101]; ++ unsigned int opts_int; ++ int req_sec_strength = 256; ++ int pred_resist = 1; ++ bool ps_exists = 0; ++ u32 ps[12]; ++ unsigned int ps_length; ++ int i; ++ int ret; ++ ++ /* First 3 digits: ++ * they have to be 0 or 1 ++ * 2-1-0 --> 2: predictoin resistance, 1: security strength, 0: personilizatoin string existence ++ */ ++ opts_str[3] = 0; ++ memcpy(opts_str, buf, 3); ++ ret = kstrtouint(opts_str, 2, &opts_int); ++ if (ret) ++ return ret; ++ ++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) || ++ ((opts_str[1] != '0') && (opts_str[1] != '1')) || ++ ((opts_str[2] != '0') && (opts_str[2] != '1'))) { ++ SYNHW_PRINT("Invalid input options: First 3 digits can only be 1 or 0\n"); ++ return -1; ++ } ++ ++ if (opts_int & 1) ++ ps_exists = 1; ++ else ++ ps_exists = 0; ++ ++ if (opts_int & 2) ++ req_sec_strength = 256; ++ else ++ req_sec_strength = 128; ++ ++ if (opts_int & 4) ++ pred_resist = 1; ++ else ++ pred_resist = 0; ++ ++ /* check input option length */ ++ if (!ps_exists) { ++ if (count != 3) { ++ SYNHW_PRINT("Invalid input options: If personilization string does not exist, options has to be 3 char.\n"); ++ return -1; ++ } ++ } else { ++ if (req_sec_strength == 128) { ++ if (count != 64 + 4) { // +4 for options and "-" ++ SYNHW_PRINT("Invalid input options: If personilization string exists and security strength is 128-bit, options has to be 68 char (not %zu char).\n", ++ count); ++ return -1; ++ } ++ } else if (req_sec_strength == 256) { ++ if (count != ++ 96 + 4) { // +4 for options and "-", +1 because of the termination char that count includes ++ SYNHW_PRINT("Invalid input options: If personilization string exists and security strength is 256-bit, options has to be 100 char (not %zu char).\n", ++ count); ++ return -1; ++ } ++ } else { ++ SYNHW_PRINT("Invalid input options\n"); ++ return -1; ++ } ++ } ++ ++ /* Personilization string */ ++ for (i = 0; i < 12; i++) ++ ps[i] = 0; ++ ++ if (req_sec_strength == 128) ++ ps_length = 64; ++ else if (req_sec_strength == 256) ++ ps_length = 96; ++ else ++ SYNHW_PRINT("Invalid security strength\n"); ++ ++ if (ps_exists) { ++ opts_str[1] = 0; ++ memcpy(opts_str, buf + 3, 1); ++ ++ if (opts_str[0] == '-') { ++ opts_str[8] = 0; ++ for (i = 0; i < ps_length / 8; i++) { ++ memcpy(opts_str, buf + 4 + i * 8, 8); ++ ret = kstrtouint(opts_str, 16, ++ ps + (ps_length / 8 - 1) - i); ++ if (ret) ++ return ret; ++ } ++ } else { ++ SYNHW_PRINT("4th character of input has to be \"-\" when personilization string exists\n"); ++ } ++ ++ ret = nisttrng_instantiate(&priv->nisttrng, ++ req_sec_strength, pred_resist, ++ ps); ++ if (ret) ++ return -1; ++ ++ } else { ++ ret = nisttrng_instantiate(&priv->nisttrng, ++ req_sec_strength, pred_resist, ++ NULL); ++ if (ret) ++ return -1; ++ } ++ ++ return count; ++} ++ ++static ssize_t uninstantiate_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ ++ nisttrng_uninstantiate(&priv->nisttrng); ++ ++ return count; ++} ++ ++static ssize_t reseed_store(struct device *dev, struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char opts_str[100]; ++ unsigned int opts_int; ++ int pred_resist = 1; ++ bool addin_exists = 0; ++ u32 addin[12]; ++ unsigned int addin_length; ++ int i; ++ int ret; ++ ++ /* First 2 digits: ++ * they have to be 0 or 1 ++ * 1-0 --> 1: predictoin resistance, 0: additional input string existence ++ */ ++ opts_str[2] = 0; ++ memcpy(opts_str, buf, 2); ++ ret = kstrtouint(opts_str, 2, &opts_int); ++ if (ret) ++ return ret; ++ ++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) || ++ ((opts_str[1] != '0') && (opts_str[1] != '1'))) { ++ SYNHW_PRINT("Invalid input options: First 2 digits can only be 1 or 0\n"); ++ return -1; ++ } ++ ++ if (opts_int & 1) ++ addin_exists = 1; ++ else ++ addin_exists = 0; ++ ++ if (opts_int & 2) ++ pred_resist = 1; ++ else ++ pred_resist = 0; ++ ++ /* check input option length */ ++ if (!addin_exists) { ++ if (count != 2) { ++ SYNHW_PRINT("Invalid input options: If additional input does not exist, options has to be 2 char.\n"); ++ return -1; ++ } ++ } else { ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) { ++ if (count != 64 + 3) { // +3 for options and "-" ++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 128-bit, options has to be 67 char.\n"); ++ return -1; ++ } ++ } else if (priv->nisttrng.status.sec_strength == ++ SEC_STRNT_AES256) { ++ if (count != 96 + 3) { // +3 for options and "-" ++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 256-bit, options has to be 99 char.\n"); ++ return -1; ++ } ++ } else { ++ SYNHW_PRINT("Invalid input options\n"); ++ return -1; ++ } ++ } ++ ++ /* Additional input */ ++ for (i = 0; i < 12; i++) ++ addin[i] = 0; ++ ++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) ++ addin_length = 64; ++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256) ++ addin_length = 96; ++ else ++ SYNHW_PRINT("Invalid security strength\n"); ++ ++ if (addin_exists) { ++ opts_str[1] = 0; ++ memcpy(opts_str, buf + 2, 1); ++ ++ if (opts_str[0] == '-') { ++ opts_str[8] = 0; ++ for (i = 0; i < addin_length / 8; i++) { ++ memcpy(opts_str, buf + 3 + i * 8, 8); ++ ret = kstrtouint(opts_str, 16, addin + (addin_length / 8 - 1) - i); ++ if (ret) ++ return ret; ++ } ++ } else { ++ SYNHW_PRINT("3rd character of input has to be \"-\" when additional input exists\n"); ++ } ++ ++ ret = nisttrng_reseed(&priv->nisttrng, pred_resist, ++ addin); ++ if (ret) ++ return -1; ++ ++ } else { ++ ret = nisttrng_reseed(&priv->nisttrng, pred_resist, ++ NULL); ++ if (ret) ++ return -1; ++ } ++ ++ return count; ++} ++ ++static ssize_t generate_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char opts_str[101]; ++ unsigned int opts_int; ++ int req_sec_strength = 128; ++ int pred_resist = 1; ++ bool addin_exists = 0; ++ unsigned char out[num_gen_bytes]; ++ u32 addin[12]; ++ unsigned int addin_length; ++ int i; ++ int ret; ++ ++ /* First 3 digits: ++ * they have to be 0 or 1 ++ * 2-1-0 --> 2: predictoin resistance, 1: security strength, 0: additional input string existence ++ */ ++ opts_str[3] = 0; ++ memcpy(opts_str, buf, 3); ++ ret = kstrtouint(opts_str, 2, &opts_int); ++ if (ret) ++ return ret; ++ ++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) || ++ ((opts_str[1] != '0') && (opts_str[1] != '1')) || ++ ((opts_str[2] != '0') && (opts_str[2] != '1'))) { ++ SYNHW_PRINT("Invalid input options: First 3 digits can only be 1 or 0\n"); ++ return -1; ++ } ++ ++ if (opts_int & 1) ++ addin_exists = 1; ++ else ++ addin_exists = 0; ++ ++ if (opts_int & 2) ++ req_sec_strength = 256; ++ else ++ req_sec_strength = 128; ++ ++ if (opts_int & 4) ++ pred_resist = 1; ++ else ++ pred_resist = 0; ++ ++ /* check input option length */ ++ if (!addin_exists) { ++ if (count != 3) { ++ SYNHW_PRINT("Invalid input options: If additional input does not exist, options has to be 3 char.\n"); ++ return -1; ++ } ++ } else { ++ if (req_sec_strength == 128) { ++ if (count != 64 + 4) { // +4 for options and "-" ++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 128-bit, options has to be 68 char.\n"); ++ return -1; ++ } ++ } else if (req_sec_strength == 256) { ++ if (count != 96 + 4) { // +4 for options and "-" ++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 256-bit, options has to be 100 char.\n"); ++ return -1; ++ } ++ } else { ++ SYNHW_PRINT("Invalid input options\n"); ++ return -1; ++ } ++ } ++ ++ /* Additional input */ ++ for (i = 0; i < 12; i++) ++ addin[i] = 0; ++ ++ if (req_sec_strength == 128) ++ addin_length = 64; ++ else if (req_sec_strength == 256) ++ addin_length = 96; ++ else ++ SYNHW_PRINT("Invalid security strength\n"); ++ ++ if (addin_exists) { ++ opts_str[1] = 0; ++ memcpy(opts_str, buf + 3, 1); ++ ++ if (opts_str[0] == '-') { ++ opts_str[8] = 0; ++ for (i = 0; i < addin_length / 8; i++) { ++ memcpy(opts_str, buf + 4 + i * 8, 8); ++ ret = kstrtouint(opts_str, 16, addin + (addin_length / 8 - 1) - i); ++ if (ret) ++ return ret; ++ } ++ } else { ++ SYNHW_PRINT("4th character of input has to be \"-\" when additional input exists\n"); ++ } ++ ++ ret = nisttrng_generate(&priv->nisttrng, (u32 *)out, ++ num_gen_bytes, req_sec_strength, ++ pred_resist, addin); ++ if (ret) ++ return -1; ++ ++ } else { ++ ret = nisttrng_generate(&priv->nisttrng, (u32 *)out, ++ num_gen_bytes, req_sec_strength, ++ pred_resist, NULL); ++ if (ret) ++ return -1; ++ } ++ ++ /* store the result */ ++ memcpy(priv->rand_out, out, sizeof(out)); ++ ++ return count; ++} ++ ++static ssize_t generate_pub_vtrng_store(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ char opts_str[2]; ++ unsigned int opts_int; ++ unsigned char out[num_gen_bytes]; ++ int ret; ++ ++ opts_str[1] = 0; ++ memcpy(opts_str, buf, 1); ++ ret = kstrtouint(opts_str, 16, &opts_int); ++ if (ret) ++ return ret; ++ ++ SYNHW_PRINT("%s %d %d %d %d\n", __func__, opts_str[0], ++ priv->nisttrng.config.edu_build_cfg0.public_vtrng_channels, ++ opts_str[1], opts_int); ++ ++ ret = nisttrng_generate_public_vtrng(&priv->nisttrng, ++ (u32 *)out, ++ num_gen_bytes, opts_int); ++ if (ret) ++ return -1; ++ ++ memcpy(priv->rand_out, out, sizeof(out)); ++ ++ return count; ++} ++ ++/* rand_out_show displays last generated random number (num_gen_bytes number of bytes), not just the last block. */ ++static ssize_t rand_out_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ unsigned int i, j; ++ unsigned long rand; ++ bool all_zero = true; ++ ++ /* If all bits of the rand_reg register are 0, display 0 */ ++ for (i = 0; i < 4; i++) { ++ rand = pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_RAND0 + ++ (3 - i)); ++ if (rand != 0) { ++ all_zero = false; ++ break; ++ } ++ } ++ ++ if (all_zero) { ++ sprintf(buf + 2 * i, "%02x", 0); ++ } else { ++ for (i = 0; i < (num_gen_bytes / 16); i++) { ++ for (j = 0; j < 16; j++) { ++ sprintf(buf + 2 * (i * 16 + j), "%02x", ++ priv->rand_out[(i + 1) * 16 - 1 - j]); ++ } ++ } ++ j = 0; ++ while (i * 16 + j < num_gen_bytes) { ++ sprintf(buf + 2 * (i * 16 + j), "%02x", ++ priv->rand_out[num_gen_bytes - 1 - j]); ++ j++; ++ } ++ } ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++/* rand_out_vtrng_show displays last generated random number (num_gen_bytes number of bytes), not just the last block. */ ++static ssize_t rand_out_vtrng_show(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ unsigned int i, j; ++ ++ /* If all bits of the rand_reg register are 0, display 0 */ ++ ++ for (i = 0; i < (num_gen_bytes / 16); i++) { ++ for (j = 0; j < 16; j++) { ++ sprintf(buf + 2 * (i * 16 + j), "%02x", ++ priv->rand_out[(i + 1) * 16 - 1 - j]); ++ } ++ } ++ ++ j = 0; ++ while (i * 16 + j < num_gen_bytes) { ++ sprintf(buf + 2 * (i * 16 + j), "%02x", ++ priv->rand_out[num_gen_bytes - 1 - j]); ++ j++; ++ } ++ ++ strcat(buf, "\n"); ++ return strlen(buf); ++} ++ ++static ssize_t kat_store(struct device *dev, struct device_attribute *devattr, ++ const char *buf, size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ if (sysfs_streq(buf, "full")) { ++ ret = nisttrng_full_kat(&priv->nisttrng); ++ if (ret) ++ return -1; ++ ++ } else if (sysfs_streq(buf, "00")) { ++ ret = nisttrng_kat(&priv->nisttrng, 0, 0); ++ if (ret) ++ return -1; ++ ++ } else if (sysfs_streq(buf, "01")) { ++ ret = nisttrng_kat(&priv->nisttrng, 0, 1); ++ if (ret) ++ return -1; ++ ++ } else if (sysfs_streq(buf, "10")) { ++ ret = nisttrng_kat(&priv->nisttrng, 1, 0); ++ if (ret) ++ return -1; ++ ++ } else if (sysfs_streq(buf, "11")) { ++ ret = nisttrng_kat(&priv->nisttrng, 1, 1); ++ if (ret) ++ return -1; ++ ++ } else { ++ ret = nisttrng_full_kat(&priv->nisttrng); ++ if (ret) ++ return -1; ++ } ++ ++ return count; ++} ++ ++static void str_to_384_bit(char *buf, u32 *out) ++{ ++ char foo[9]; ++ int i; ++ int ret; ++ ++ foo[8] = 0; ++ for (i = 0; i < 12; i++) { ++ memcpy(foo, buf + i * 8, 8); ++ ret = kstrtouint(foo, 16, out + 11 - i); ++ } ++} ++ ++/* This attribute is only for test purpuses */ ++static ssize_t test_attr_store(struct device *dev, ++ struct device_attribute *devattr, const char *buf, ++ size_t count) ++{ ++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev); ++ int i; ++ int err; ++ u32 addin[12]; ++ u32 ps[12]; ++ char *out; ++ ++ char buf_seed1[96] = ++ "c54805274bde00aa5289e0513579019707666d2fa7a1c8908865891c87c0c652335a4d3cc415bc30742b164647f8820f"; ++ char buf_ps1[96] = ++ "d63fb5afa2101fa4b8a6c3b89d9c250ac728fc1ddad0e7585b5d54728ed20c2f940e89155596e3b963635b6d6088164b"; ++ char buf_addin1[96] = ++ "744bfae3c23a5cc9a3b373b6c50795068d35eb8a339746ac810d16f864e880061082edf9d2687c211960aa83400f85f9"; ++ char buf_seed2[96] = ++ "b2ad31d1f20dcf30dd526ec9156c07f270216bdb59197325bab180675929888ab699c54fb21819b7d921d6346bff2f7f"; ++ char buf_addin2[96] = ++ "ad55c682962aa4fe9ebc227c9402e79b0aa7874844d33eaee7e2d15baf81d9d33936e4d93f28ad109657b512aee115a5"; ++ char buf_seed3[96] = ++ "eca449048d26fd38f8ca435237dce66eadec7069ee5dd0b70084b819a711c0820a7556bbd0ae20f06e5169278b593b71"; ++ u32 tmp[12]; ++ ++ for (i = 0; i < 12; i++) ++ addin[i] = i; ++ ++ for (i = 0; i < 12; i++) ++ ps[i] = i + 100; ++ ++ /* SDK doc example - Prediction Resistance not available, no Reseed */ ++ err = nisttrng_uninstantiate(&priv->nisttrng); ++ if (err && err != CRYPTO_NOT_INSTANTIATED) ++ return -1; ++ ++ if (nisttrng_instantiate(&priv->nisttrng, 128, 0, ps) < 0) ++ return -1; ++ ++ out = kmalloc(10, GFP_KERNEL); ++ if (nisttrng_generate(&priv->nisttrng, out, 10, 128, 0, addin) < 0) ++ return -1; ++ ++ DEBUG("----- Generate 10 bytes\n"); ++ for (i = 0; i < 10; i++) ++ DEBUG("%02x", out[i]); ++ ++ DEBUG("\n"); ++ kfree(out); ++ ++ out = kmalloc(512, GFP_KERNEL); ++ if (nisttrng_generate(&priv->nisttrng, out, 512, 128, 0, addin) < 0) ++ return -1; ++ ++ DEBUG("----- Generate 512 bytes\n"); ++ for (i = 0; i < 512; i++) ++ DEBUG("%02x", out[i]); ++ ++ DEBUG("\n"); ++ kfree(out); ++ ++ out = kmalloc(41, GFP_KERNEL); ++ if (nisttrng_generate(&priv->nisttrng, out, 41, 128, 0, addin) < 0) ++ return -1; ++ ++ DEBUG("----- Generate 41 bytes\n"); ++ for (i = 0; i < 41; i++) ++ DEBUG("%02x", out[i]); ++ ++ DEBUG("\n"); ++ kfree(out); ++ ++ err = nisttrng_uninstantiate(&priv->nisttrng); ++ if (err < 0 && err != CRYPTO_NOT_INSTANTIATED) ++ return -1; ++ ++ /* SDK doc example - DRBG Validation */ ++ err = nisttrng_uninstantiate(&priv->nisttrng); ++ if (err && err != CRYPTO_NOT_INSTANTIATED) ++ return -1; ++ ++ if (nisttrng_set_nonce_mode(&priv->nisttrng, 1) < 0) ++ return -1; ++ ++ out = kmalloc(64, GFP_KERNEL); ++ str_to_384_bit(buf_seed1, tmp); ++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0) ++ return -1; ++ ++ str_to_384_bit(buf_ps1, tmp); ++ if (nisttrng_instantiate(&priv->nisttrng, 256, 1, tmp) < 0) ++ return -1; ++ ++ str_to_384_bit(buf_seed2, tmp); ++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0) ++ return -1; ++ ++ str_to_384_bit(buf_addin1, tmp); ++ if (nisttrng_generate(&priv->nisttrng, out, 64, 256, 1, tmp) < 0) ++ return -1; ++ ++ str_to_384_bit(buf_seed3, tmp); ++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0) ++ return -1; ++ ++ str_to_384_bit(buf_addin2, tmp); ++ if (nisttrng_generate(&priv->nisttrng, out, 64, 256, 1, tmp) < 0) ++ return -1; ++ ++ memcpy(priv->rand_out, out, 64); ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RO(ckr); ++static DEVICE_ATTR_RO(features); ++static DEVICE_ATTR_RW(secure); ++static DEVICE_ATTR_RW(nonce); ++static DEVICE_ATTR_RW(sec_strength); ++ ++static DEVICE_ATTR_RW(mode_reg); ++static DEVICE_ATTR_RW(smode_reg); ++static DEVICE_ATTR_RW(alarm_reg); ++static DEVICE_ATTR_RO(rand_reg); ++static DEVICE_ATTR_RO(rand_out); ++static DEVICE_ATTR_RO(rand_out_vtrng); ++static DEVICE_ATTR_RW(seed_reg); ++static DEVICE_ATTR_RW(npa_data_reg); ++static DEVICE_ATTR_RW(ctrl_reg); ++static DEVICE_ATTR_RW(istat_reg); ++static DEVICE_ATTR_RO(stat_reg); ++static DEVICE_ATTR_RW(rnc_reg); ++static DEVICE_ATTR_RW(rbc_reg); ++ ++static DEVICE_ATTR_RW(ia_wdata_reg); ++static DEVICE_ATTR_RO(ia_rdata_reg); ++static DEVICE_ATTR_RW(ia_addr_reg); ++static DEVICE_ATTR_RW(ia_cmd_reg); ++static DEVICE_ATTR_RO(hw_state); ++ ++static DEVICE_ATTR_RO(collect_ent); ++static DEVICE_ATTR_RO(collect_ent_nsout); ++static DEVICE_ATTR_WO(nonce_seed_with_df); ++static DEVICE_ATTR_WO(nonce_seed_direct); ++static DEVICE_ATTR_WO(instantiate); ++static DEVICE_ATTR_WO(uninstantiate); ++static DEVICE_ATTR_WO(reseed); ++static DEVICE_ATTR_WO(generate); ++static DEVICE_ATTR_WO(generate_pub_vtrng); ++static DEVICE_ATTR_WO(kat); ++ ++static DEVICE_ATTR_RW(max_bits_per_req); ++static DEVICE_ATTR_RW(max_req_per_seed); ++ ++static DEVICE_ATTR_WO(test_attr); ++ ++static const struct attribute_group nisttrng_attr_group = { ++ .attrs = ++ (struct attribute *[]){ ++ &dev_attr_ckr.attr, ++ //&dev_attr_stepping.attr, ++ &dev_attr_features.attr, &dev_attr_secure.attr, ++ &dev_attr_nonce.attr, &dev_attr_sec_strength.attr, ++ ++ &dev_attr_mode_reg.attr, &dev_attr_smode_reg.attr, ++ &dev_attr_alarm_reg.attr, &dev_attr_rand_reg.attr, ++ &dev_attr_rand_out.attr, &dev_attr_rand_out_vtrng.attr, ++ &dev_attr_seed_reg.attr, &dev_attr_npa_data_reg.attr, ++ &dev_attr_ctrl_reg.attr, &dev_attr_istat_reg.attr, ++ &dev_attr_stat_reg.attr, &dev_attr_rnc_reg.attr, ++ &dev_attr_rbc_reg.attr, ++ ++ &dev_attr_ia_wdata_reg.attr, ++ &dev_attr_ia_rdata_reg.attr, &dev_attr_ia_addr_reg.attr, ++ &dev_attr_ia_cmd_reg.attr, &dev_attr_hw_state.attr, ++ ++ &dev_attr_collect_ent.attr, ++ &dev_attr_collect_ent_nsout.attr, ++ &dev_attr_nonce_seed_with_df.attr, ++ &dev_attr_nonce_seed_direct.attr, ++ &dev_attr_instantiate.attr, ++ &dev_attr_uninstantiate.attr, &dev_attr_reseed.attr, ++ &dev_attr_generate.attr, ++ &dev_attr_generate_pub_vtrng.attr, &dev_attr_kat.attr, ++ ++ &dev_attr_max_bits_per_req.attr, ++ &dev_attr_max_req_per_seed.attr, ++ ++ &dev_attr_test_attr.attr, NULL }, ++}; ++ ++static int nisttrng_self_test(struct nist_trng_state *nist_trng) ++{ ++ u32 seed[16], out[4], x, y; ++ ++ static const u32 exp128[10][4] = { ++ { 0x5db79bb2, 0xc3a0df1e, 0x099482b6, ++ 0xc319981e }, // The 1st generated output ++ { 0xb344d301, 0xdbd97ca0, 0x6e66e668, ++ 0x0bcd4625 }, // The 2nd generate output ++ { 0xec553f18, 0xa0e5c3cb, 0x752c03c2, ++ 0x5e7b04f7 }, // The 3rd generate output ++ { 0xcfe23e6e, 0x5302edc2, 0xdbf7b05b, ++ 0x2c817c0f }, // The 4th generate output ++ { 0xbd5a8726, 0x028c43d0, 0xb77ac4e3, ++ 0x0844ba2c }, // The 5th generate output ++ { 0xa63b4c0e, 0x8d11d0ba, 0x08b5a10f, ++ 0xab731aff }, // The 6th generate output ++ { 0xb7b56a2f, 0x1d84d1f0, 0xe48d1a0a, ++ 0x43a010a6 }, // The 7th generate output ++ { 0xcf66439d, 0xc937451d, 0x75c34d20, ++ 0x21a21398 }, // The 8th generate output ++ { 0xcb6f0a57, 0x5ff34705, 0x08838e49, ++ 0x21137614 }, // The 9th generate output ++ { 0x61c48b24, 0x25c18d29, 0xc6005e4e, ++ 0xae3b0389 }, // The 10th generate output ++ }; ++ ++ static const u32 exp256[10][4] = { ++ { 0x1f1a1441, 0xa0865ece, 0x9ff8d5b9, ++ 0x3f78ace6 }, // The 1st generated output ++ { 0xf8190a86, 0x6d6ded2a, 0xc4d0e9bf, ++ 0x24dab55c }, // The 2nd generate output ++ { 0xd3948b74, 0x3dfea516, 0x9c3b86a2, ++ 0xeb184b41 }, // The 3rd generate output ++ { 0x2eb82ab6, 0x2aceefda, 0xc0cf6a5f, ++ 0xa45cb333 }, // The 4th generate output ++ { 0xa49b1c7b, 0x5b51bac7, 0x7586770b, ++ 0x8cb2c392 }, // The 5th generate output ++ { 0x3f3ba09d, 0xa2c9ad29, 0x9687fb8f, ++ 0xa5ae3fd5 }, // The 6th generate output ++ { 0x11dd1076, 0xe37e86cb, 0xced0220a, ++ 0x00448c4f }, // The 7th generate output ++ { 0x955a5e52, 0x84ee38b1, 0xb3271e5f, ++ 0x097751e3 }, // The 8th generate output ++ { 0x5cd73ba8, 0xd8a36a1e, 0xa8a2d7c3, ++ 0xa96de048 }, // The 9th generate output ++ { 0xfb374c63, 0x827b85fa, 0x244e0c7a, ++ 0xa09afd39 }, // The 10th generate output ++ }; ++ ++ int ret, enable, rate, urun; ++ u32 tmp; ++ ++ for (x = 0; x < 16; x++) ++ seed[x] = 0x12345679 * (x + 1); ++ ++ DEBUG("Doing a self-test with security strength of 128\n"); ++ ret = nisttrng_uninstantiate(nist_trng); ++ if (ret && ret != CRYPTO_NOT_INSTANTIATED) ++ goto ERR; ++ ++ //if ((ret = nisttrng_set_secure_mode(nist_trng, 0))) { goto ERR; } ++ ret = nisttrng_set_nonce_mode(nist_trng, 1); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_set_sec_strength(nist_trng, 128); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_get_entropy_input(nist_trng, seed, 0); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_instantiate(nist_trng, 128, 0, NULL); ++ if (ret) ++ goto ERR; ++ ++ if (nist_trng->config.build_cfg0.edu_present) { ++ ret = nisttrng_wait_fifo_full(nist_trng); ++ if (ret) ++ goto ERR; ++ } ++ ++ ret = nisttrng_generate(nist_trng, out, 16, 128, 0, NULL); ++ if (ret) ++ goto ERR; ++ ++ if (nist_trng->config.features.extra_ps_present) { ++ DEBUG("skip KAT with extra_ps_present\n"); ++ } else { ++ DEBUG("nist_trng: AES-128 Self-test output: "); ++ for (x = 0; x < 4; x++) ++ DEBUG("0x%08lx ", (unsigned long)out[x]); ++ ++ if (nist_trng->config.build_cfg0.edu_present) { ++ if (nist_trng->config.edu_build_cfg0 ++ .esm_channel) { //if esm_channel is available the first random number goes to esm ++ for (x = 0; x < 4; x++) { ++ if (out[x] != exp128[1][x]) ++ ret = 1; ++ } ++ } ++ } else { ++ for (x = 0; x < 4; x++) { ++ if (out[x] != exp128[0][x]) ++ ret = 1; ++ } ++ } ++ ++ if (ret) { ++ SYNHW_PRINT("... FAILED comparison\n"); ++ ret = -1; ++ goto ERR; ++ } else { ++ DEBUG("... PASSED\n"); ++ } ++ } ++ ++ // if edu is available check all the pvtrng's ++ if (nist_trng->config.build_cfg0.edu_present) { ++ for (x = 0; ++ x < nist_trng->config.edu_build_cfg0.public_vtrng_channels; ++ x++) { ++ DEBUG("vtrng %d\n", x); ++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, x); ++ if (ret) ++ goto ERR; ++ ++ for (y = 0; y < 4; y++) { ++ DEBUG("0x%08lx ", (unsigned long)out[y]); ++ if (out[y] != exp128[x + 2][y]) ++ ret = 1; ++ } ++ if (ret) { ++ SYNHW_PRINT("... FAILED comparison\n"); ++ ret = -1; ++ goto ERR; ++ } else { ++ DEBUG("... PASSED\n"); ++ } ++ } ++ } ++ // if edu is available empty the fifo before creating the new instance with strength of 256 ++ if (nist_trng->config.build_cfg0.edu_present) { ++ nisttrng_rnc(nist_trng, ++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE); ++ tmp = NIST_TRNG_REG_ISTAT_DONE; ++ //always clear the busy bit after disabling RNC ++ pdu_io_write32(nist_trng->base + NIST_TRNG_REG_ISTAT, tmp); ++ tmp = pdu_io_read32(nist_trng->base + NIST_TRNG_REG_ISTAT); ++ do { ++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, 0); ++ if (ret) ++ goto ERR; ++ ++ tmp = pdu_io_read32(nist_trng->base + ++ NIST_TRNG_EDU_STAT); ++ ++ } while (!NIST_TRNG_EDU_STAT_FIFO_EMPTY(tmp)); ++ } ++ ++ if (nist_trng->config.features.drbg_arch == AES256) { ++ // test AES-256 mode ++ DEBUG("Doing a self-test with security strength of 256\n"); ++ ret = nisttrng_uninstantiate(nist_trng); ++ if (ret && ret != CRYPTO_NOT_INSTANTIATED) ++ goto ERR; ++ ++ ret = nisttrng_set_nonce_mode(nist_trng, 1); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_set_sec_strength(nist_trng, 256); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_get_entropy_input(nist_trng, seed, 0); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_instantiate(nist_trng, 256, 0, NULL); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_generate(nist_trng, out, 16, 256, 0, NULL); ++ if (ret) ++ goto ERR; ++ ++ if (nist_trng->config.features.extra_ps_present) { ++ DEBUG("skip KAT with extra_ps_present\n"); ++ } else { ++ DEBUG("nist_trng: AES-256 Self-test output: "); ++ for (x = 0; x < 4; x++) ++ DEBUG("0x%08lx ", (unsigned long)out[x]); ++ ++ for (x = 0; x < 4; x++) { ++ if (out[x] != exp256[0][x]) ++ ret = 1; ++ } ++ if (ret) { ++ SYNHW_PRINT("... FAILED comparison\n"); ++ ret = -1; ++ goto ERR; ++ } else { ++ DEBUG("... PASSED\n"); ++ } ++ } ++ } ++ ++ // if edu is available check all the pvtrng's ++ if (nist_trng->config.build_cfg0.edu_present) { ++ for (x = 0; ++ x < nist_trng->config.edu_build_cfg0.public_vtrng_channels; ++ x++) { ++ DEBUG("vtrng 256 %d\n", x); ++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, x); ++ if (ret) ++ goto ERR; ++ ++ for (y = 0; y < 4; y++) { ++ DEBUG("0x%08lx ", (unsigned long)out[y]); ++ if (out[y] != exp256[x + 1][y]) ++ ret = 1; ++ } ++ if (ret) { ++ SYNHW_PRINT("... FAILED comparison\n"); ++ ret = -1; ++ goto ERR; ++ } else { ++ DEBUG("... PASSED\n"); ++ } ++ } ++ ++ //Test RBC channels ++ // enable RBC channels with rate of 2 and urun 1 ++ enable = 1; ++ rate = 2; ++ urun = 1; ++ for (x = 0; x < nist_trng->config.edu_build_cfg0.rbc_channels; ++ x++) { ++ ret = nisttrng_rbc(nist_trng, enable, x, rate, urun); ++ if (ret) ++ goto ERR; ++ ++ tmp = pdu_io_read32(nist_trng->base + ++ NIST_TRNG_EDU_RBC_CTRL); ++ ++ switch (x) { ++ case 0: ++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE) || ++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK)) { ++ goto ERR; ++ } ++ break; ++ case 1: ++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE) || ++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK)) { ++ goto ERR; ++ } ++ break; ++ case 2: ++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE) || ++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK)) { ++ goto ERR; ++ } ++ break; ++ default: ++ DEBUG("Incorrect rbc_num = %d\n", x); ++ goto ERR; ++ } ++ } ++ DEBUG("RBC test passed\n"); ++ } ++ ++ //IF RNCis not disable, disable it ++ if (pdu_io_read32(nist_trng->base + NIST_TRNG_EDU_RNC_CTRL) != ++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE) { ++ nisttrng_rnc(nist_trng, ++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE); ++ tmp = NIST_TRNG_REG_ISTAT_DONE; ++ //always clear the busy bit after disabling RNC ++ pdu_io_write32(nist_trng->base + NIST_TRNG_REG_ISTAT, tmp); ++ } ++ ++ /* back to the noise mode */ ++ ret = nisttrng_set_nonce_mode(nist_trng, 0); ++ if (ret) ++ goto ERR; ++ ++ ret = nisttrng_zeroize(nist_trng); ++ if (ret) ++ goto ERR; ++ERR: ++ return ret; ++} ++ ++static int nisttrng_driver_probe(struct platform_device *pdev) ++{ ++ struct synopsys_nisttrng_driver *data; ++ struct hwrng *hwrng_driver_info = 0; ++ struct resource *cfg, *irq; ++ u32 *base_addr; ++ int ret; ++ ++ // version ++ SYNHW_PRINT("DWC_TRNG_DriverSDK_%s\n", TRNG_VERSION); ++ ++ cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ ++ if (!cfg || !irq) { ++ SYNHW_PRINT("no memory or IRQ resource\n"); ++ return -ENOMEM; ++ } ++ ++ DEBUG("=================================================================\n"); ++ DEBUG("nisttrng_probe: Device at %08lx(%08lx) of size %lu bytes\n", ++ (unsigned long)cfg->start, (unsigned long)cfg->end, ++ (unsigned long)resource_size(cfg)); ++ ++ data = devm_kzalloc(&pdev->dev, sizeof(struct synopsys_nisttrng_driver), ++ GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, data); ++ ++ base_addr = pdu_linux_map_regs(&pdev->dev, cfg); ++ if (IS_ERR(base_addr)) { ++ dev_err(&pdev->dev, "unable to remap io mem\n"); ++ return PTR_ERR(base_addr); ++ } ++ ++ ret = nisttrng_init(&data->nisttrng, (u32 *)base_addr); ++ if (ret) { ++ SYNHW_PRINT("NIST_TRNG init failed (%d)\n", ret); ++ devm_kfree(&pdev->dev, data); ++ return ret; ++ } ++ ++ /* if max_reads is not 0, change the max_req_per_seed according to max_reads */ ++ if (max_reads) { ++ ret = nisttrng_set_reminder_max_req_per_seed(&data->nisttrng, max_reads); ++ if (ret) { ++ SYNHW_PRINT("NIST_TRNG maximum request-per-seed setup failed (%d)\n", ++ ret); ++ devm_kfree(&pdev->dev, data); ++ return ret; ++ } ++ } ++ ++ // issue quick self test ++ ret = nisttrng_self_test(&data->nisttrng); ++ if (ret) { ++ devm_kfree(&pdev->dev, data); ++ return -ENOMEM; ++ } ++ ++ // ready the device for use ++ ret = nisttrng_instantiate(&data->nisttrng, ++ data->nisttrng.config.features.drbg_arch ? 256 : 128, 1, NULL); ++ if (ret) { ++ SYNHW_PRINT("NIST_TRNG instantiate failed (%d)\n", ret); ++ devm_kfree(&pdev->dev, data); ++ return -ENOMEM; ++ } ++ ++ // at this point the device should be ready for a call to gen_random ++ hwrng_driver_info = ++ devm_kzalloc(&pdev->dev, sizeof(struct hwrng), GFP_KERNEL); ++ if (!hwrng_driver_info) { ++ devm_kfree(&pdev->dev, data); ++ return -ENOMEM; ++ } ++ ++ hwrng_driver_info->name = devm_kzalloc(&pdev->dev, ++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME) + 1, GFP_KERNEL); ++ if (!hwrng_driver_info->name) { ++ devm_kfree(&pdev->dev, data); ++ devm_kfree(&pdev->dev, hwrng_driver_info); ++ return -ENOMEM; ++ } ++ ++ memset((void *)hwrng_driver_info->name, 0, ++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME) + 1); ++ strscpy((char *)hwrng_driver_info->name, SYNOPSYS_HWRNG_DRIVER_NAME, ++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME)); ++ ++ hwrng_driver_info->read = &nisttrng_hwrng_driver_read; ++ hwrng_driver_info->data_present = 0; ++ hwrng_driver_info->priv = (unsigned long)pdev; ++ hwrng_driver_info->quality = 1024; ++ ++ data->hwrng_drv = hwrng_driver_info; ++ ret = hwrng_register(hwrng_driver_info); ++ ++ if (ret) { ++ SYNHW_PRINT("unable to load HWRNG driver (error %d)\n", ret); ++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name); ++ devm_kfree(&pdev->dev, hwrng_driver_info); ++ devm_kfree(&pdev->dev, data); ++ return ret; ++ } ++ ++ ret = sysfs_create_group(&pdev->dev.kobj, &nisttrng_attr_group); ++ if (ret < 0) { ++ SYNHW_PRINT("unable to initialize sysfs group (error %d)\n", ++ ret); ++ hwrng_unregister(hwrng_driver_info); ++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name); ++ devm_kfree(&pdev->dev, hwrng_driver_info); ++ devm_kfree(&pdev->dev, data); ++ return ret; ++ } ++ SYNHW_PRINT("SYN NIST_TRNG registering HW_RANDOM\n"); ++ return 0; ++} ++ ++static void nisttrng_driver_remove(struct platform_device *pdev) ++{ ++ struct synopsys_nisttrng_driver *data = platform_get_drvdata(pdev); ++ struct hwrng *hwrng_driver_info = (struct hwrng *)data->hwrng_drv; ++ ++ SYNHW_PRINT("SYN NIST_TRNG unregistering from HW_RANDOM\n"); ++ hwrng_unregister(hwrng_driver_info); ++ sysfs_remove_group(&pdev->dev.kobj, &nisttrng_attr_group); ++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name); ++ devm_kfree(&pdev->dev, hwrng_driver_info); ++ devm_kfree(&pdev->dev, data); ++} ++ ++static struct platform_driver s_nisttrng_platform_driver_info = { ++ .probe = nisttrng_driver_probe, ++ .remove = nisttrng_driver_remove, ++ .driver = { ++ .name = "nist_trng", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init nisttrng_platform_driver_start(void) ++{ ++ return platform_driver_register(&s_nisttrng_platform_driver_info); ++} ++ ++static void __exit nisttrng_platform_driver_end(void) ++{ ++ platform_driver_unregister(&s_nisttrng_platform_driver_info); ++} ++ ++module_init(nisttrng_platform_driver_start); ++module_exit(nisttrng_platform_driver_end); ++ ++module_param(max_reads, ulong, 0); ++MODULE_PARM_DESC(max_reads, "Max # of reads between reseeds (default is 128)"); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Synopsys, Inc."); +diff --git a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c +--- a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c 2025-12-23 10:16:19.525059469 +0000 +@@ -0,0 +1,956 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "nisttrng_hw.h" ++#include "nisttrng.h" ++ ++/* Initialize the NIST_TRNG state structure */ ++int nisttrng_init(struct nist_trng_state *state, u32 *base) ++{ ++ int err; ++ u32 tmp; ++ ++ DEBUG(">> %s: initialize the NIST_TRNG\n", __func__); ++ ++ memset(state, 0, sizeof(*state)); ++ ++ state->base = base; ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* hardware features*/ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_FEATURES); ++ ++ state->config.features.drbg_arch = NIST_TRNG_REG_FEATURES_AES_256(tmp); ++ state->config.features.extra_ps_present = ++ NIST_TRNG_REG_FEATURES_EXTRA_PS_PRESENT(tmp); ++ state->config.features.secure_rst_state = ++ NIST_TRNG_REG_FEATURES_SECURE_RST_STATE(tmp); ++ state->config.features.diag_level_basic_trng = ++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_BASIC_TRNG(tmp); ++ state->config.features.diag_level_stat_hlt = ++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_ST_HLT(tmp); ++ state->config.features.diag_level_ns = ++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_NS(tmp); ++ ++ /* corekit */ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_COREKIT_REL); ++ state->config.corekit_rel.ext_enum = NIST_TRNG_REG_EXT_ENUM(tmp); ++ state->config.corekit_rel.ext_ver = NIST_TRNG_REG_EXT_VER(tmp); ++ state->config.corekit_rel.rel_num = NIST_TRNG_REG_REL_NUM(tmp); ++ ++ /* clear registers */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_ALARM, 0xFFFFFFFF); ++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, 0xFFFFFFFF); ++ ++ /* setup the NIST_TRNG in secure mode, self seeding mode, with prediction resistance, maximum possible security strength */ ++ /* SMODE */ ++ tmp = 0; ++ tmp = NIST_TRNG_REG_SMODE_SET_SECURE_EN(tmp, 1); ++ tmp = NIST_TRNG_REG_SMODE_SET_NONCE(tmp, 0); ++ tmp = NIST_TRNG_REG_SMODE_SET_MAX_REJECTS(tmp, ++ NIST_TRNG_DFLT_MAX_REJECTS); ++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp); ++ state->status.secure_mode = 1; ++ state->status.nonce_mode = 0; ++ /* MODE */ ++ tmp = 0; ++ if (state->config.features.drbg_arch == AES256) { ++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, 1); ++ state->status.sec_strength = SEC_STRNT_AES256; ++ ++ } else if (state->config.features.drbg_arch == AES128) { ++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, 0); ++ state->status.sec_strength = SEC_STRNT_AES128; ++ ++ } else { ++ SYNHW_PRINT("Invalid DRBG architecture"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ tmp = NIST_TRNG_REG_MODE_SET_PRED_RESIST(tmp, 1); ++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, 0); ++ state->status.pred_resist = 1; ++ /* rest of the status */ ++ state->status.alarm_code = 0; ++ state->status.pad_ps_addin = 0; ++ ++ /* reminders - set the counters to the standard's maximum values. An API is be provided to change those on demand.*/ ++ nisttrng_set_reminder_max_bits_per_req(state, ++ NIST_DFLT_MAX_BITS_PER_REQ); ++ nisttrng_set_reminder_max_req_per_seed(state, ++ NIST_DFLT_MAX_REQ_PER_SEED); ++ ++ /* display features */ ++ SYNHW_PRINT("NIST_TRNG: Hardware rel_num=0x%x, ext_ver=0x%x, ext_enum=0x%x\n", ++ state->config.corekit_rel.rel_num, ++ state->config.corekit_rel.ext_ver, ++ state->config.corekit_rel.ext_enum); ++ switch (state->config.features.drbg_arch) { ++ case AES128: ++ DEBUG("NIST_TRNG: DRBG Architecture=128-bit AES, Extra Personalization Existence=%u\n", ++ state->config.features.extra_ps_present); ++ break; ++ case AES256: ++ DEBUG("NIST_TRNG: DRBG Architecture=256-bit AES, Extra Personalization Existence=%u\n", ++ state->config.features.extra_ps_present); ++ break; ++ default: ++ SYNHW_PRINT("Invalid DRBG architecture"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ DEBUG("initialization is done, going for a zeroize\n"); ++ ++ // BUILD_CFG0 ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_BUILD_CFG0); ++ state->config.build_cfg0.core_type = NIST_TRNG_REG_CFG0_CORE_TYPE(tmp); ++ state->config.build_cfg0.bg8 = NIST_TRNG_REG_CFG0_BG8(tmp); ++ state->config.build_cfg0.cdc_synch_depth = ++ NIST_TRNG_REG_CFG0_CDC_SYNCH_DEPTH(tmp); ++ state->config.build_cfg0.background_noise = ++ NIST_TRNG_REG_CFG0_BACGROUND_NOISE(tmp); ++ state->config.build_cfg0.edu_present = ++ NIST_TRNG_REG_CFG0_EDU_PRESENT(tmp); ++ state->config.build_cfg0.aes_datapath = ++ NIST_TRNG_REG_CFG0_AES_DATAPATH(tmp); ++ state->config.build_cfg0.aes_max_key_size = ++ NIST_TRNG_REG_CFG0_AES_MAX_KEY_SIZE(tmp); ++ state->config.build_cfg0.personilzation_str = ++ NIST_TRNG_REG_CFG0_PERSONILIZATION_STR(tmp); ++ DEBUG("NIST_TRNG: BUILD_CFG0 core_type=%u, bg8=%u, cdc_synch_depth=%u, background_noise=%u\n", ++ state->config.build_cfg0.core_type, state->config.build_cfg0.bg8, ++ state->config.build_cfg0.cdc_synch_depth, ++ state->config.build_cfg0.background_noise); ++ DEBUG("edu_present=%u, aes_datapath=%u, aes_max_key_size=%u, personilzation_str=%u\n", ++ state->config.build_cfg0.edu_present, ++ state->config.build_cfg0.aes_datapath, ++ state->config.build_cfg0.aes_max_key_size, ++ state->config.build_cfg0.personilzation_str); ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_BUILD_CFG1); ++ DEBUG("NIST_TRNG: NIST_TRNG_REG_BUILD_CFG1=0x%x\n", tmp); ++ state->config.build_cfg1.num_raw_noise_blks = ++ NIST_TRNG_REG_CFG1_NUM_RAW_NOISE_BLKS(tmp); ++ state->config.build_cfg1.sticky_startup = ++ NIST_TRNG_REG_CFG1_STICKY_STARTUP(tmp); ++ state->config.build_cfg1.auto_correlation_test = ++ NIST_TRNG_REG_CFG1_AUTO_CORRELATION_TEST(tmp); ++ state->config.build_cfg1.mono_bit_test = ++ NIST_TRNG_REG_CFG1_MONO_BIT_TEST(tmp); ++ state->config.build_cfg1.run_test = NIST_TRNG_REG_CFG1_RUN_TEST(tmp); ++ state->config.build_cfg1.poker_test = ++ NIST_TRNG_REG_CFG1_POKER_TEST(tmp); ++ state->config.build_cfg1.raw_ht_adap_test = ++ NIST_TRNG_REG_CFG1_RAW_HT_ADAP_TEST(tmp); ++ state->config.build_cfg1.raw_ht_rep_test = ++ NIST_TRNG_REG_CFG1_RAW_HT_REP_TEST(tmp); ++ state->config.build_cfg1.ent_src_rep_smpl_size = ++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_SMPL_SIZE(tmp); ++ state->config.build_cfg1.ent_src_rep_test = ++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_TEST(tmp); ++ state->config.build_cfg1.ent_src_rep_min_entropy = ++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_MIN_ENTROPY(tmp); ++ DEBUG("NIST_TRNG: BUILD_CFG1 num_raw_noise_blks=%u, sticky_startup=%u, auto_correlation_test=%u\n", ++ state->config.build_cfg1.num_raw_noise_blks, ++ state->config.build_cfg1.sticky_startup, ++ state->config.build_cfg1.auto_correlation_test); ++ DEBUG("mono_bit_test=%u, run_test=%u, poker_test=%u, raw_ht_adap_test=%u\n", ++ state->config.build_cfg1.mono_bit_test, ++ state->config.build_cfg1.run_test, ++ state->config.build_cfg1.poker_test, ++ state->config.build_cfg1.raw_ht_adap_test); ++ DEBUG("raw_ht_rep_test=%u, ent_src_rep_smpl_size=%u, ent_src_rep_test=%u, ent_src_rep_min_entropy=%u\n", ++ state->config.build_cfg1.raw_ht_rep_test, ++ state->config.build_cfg1.ent_src_rep_smpl_size, ++ state->config.build_cfg1.ent_src_rep_test, ++ state->config.build_cfg1.ent_src_rep_min_entropy); ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_BUILD_CFG0); ++ state->config.edu_build_cfg0.rbc2_rate_width = ++ NIST_TRNG_REG_EDU_CFG0_RBC2_RATE_WIDTH(tmp); ++ state->config.edu_build_cfg0.rbc1_rate_width = ++ NIST_TRNG_REG_EDU_CFG0_RBC1_RATE_WIDTH(tmp); ++ state->config.edu_build_cfg0.rbc0_rate_width = ++ NIST_TRNG_REG_EDU_CFG0_RBC0_RATE_WIDTH(tmp); ++ state->config.edu_build_cfg0.public_vtrng_channels = ++ NIST_TRNG_REG_EDU_CFG0_PUBLIC_VTRNG_CHANNELS(tmp); ++ state->config.edu_build_cfg0.esm_channel = ++ NIST_TRNG_REG_EDU_CFG0_ESM_CHANNEL(tmp); ++ state->config.edu_build_cfg0.rbc_channels = ++ NIST_TRNG_REG_EDU_CFG0_RBC_CHANNELS(tmp); ++ state->config.edu_build_cfg0.fifo_depth = ++ NIST_TRNG_REG_EDU_CFG0_FIFO_DEPTH(tmp); ++ DEBUG("NIST_TRNG: EDU_BUILD_CFG0 rbc2_rate_width=%u, rbc1_rate_width=%u, rbc0_rate_width=%u\n", ++ state->config.edu_build_cfg0.rbc2_rate_width, ++ state->config.edu_build_cfg0.rbc1_rate_width, ++ state->config.edu_build_cfg0.rbc0_rate_width); ++ DEBUG("public_vtrng_channels=%u, esm_channel=%u, rbc_channels=%u, fifo_depth=%u\n", ++ state->config.edu_build_cfg0.public_vtrng_channels, ++ state->config.edu_build_cfg0.esm_channel, ++ state->config.edu_build_cfg0.rbc_channels, ++ state->config.edu_build_cfg0.fifo_depth); ++ ++ state->status.edu_vstat.seed_enum = ++ NIST_TRNG_REG_EDU_VSTAT_SEED_ENUM(tmp); ++ state->status.edu_vstat.rnc_enabled = ++ NIST_TRNG_REG_EDU_VSTAT_RNC_ENABLED(tmp); ++ ++ err = nisttrng_zeroize(state); ++ if (err) ++ goto ERR; ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_INITIALIZE; ++ERR: ++ DEBUG("--- %s Return, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_init */ ++EXPORT_SYMBOL(nisttrng_init); ++ ++/* Instantiate the DRBG state */ ++int nisttrng_instantiate(struct nist_trng_state *state, int req_sec_strength, ++ int pred_resist, void *personal_str) ++{ ++ int err; ++ u32 tmp; ++ u32 zero_ps[12] = { 0 }; ++ int i = 0; ++ ++ DEBUG(">> %s: security strength = %u, pred_resist = %u, personilization string existence = %u\n", ++ __func__, req_sec_strength, pred_resist, (personal_str) ? 1 : 0); ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* If DRBG is already instantiated or if current state does not allow an instantiate, return error */ ++ if (DRBG_INSTANTIATED(state->status.current_state)) { ++ DEBUG("Initial check: DRBG state is already instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ if (state->status.current_state != NIST_TRNG_STATE_INITIALIZE && ++ state->status.current_state != NIST_TRNG_STATE_UNINSTANTIATE) { ++ DEBUG("Cannot instantiate in the current state (%u)\n", ++ state->status.current_state); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* if hardware is not configured to accept extra personalization string, but personal_str is not NULL, return error */ ++ if (!state->config.features.extra_ps_present && personal_str) { ++ DEBUG("HW config does not allow extra PS\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* Validate and set the security strength */ ++ err = nisttrng_set_sec_strength(state, req_sec_strength); ++ if (err) ++ goto ERR; ++ ++ /* get entropy - noise seeding. If the mode is nonce, get_entropy must be called by the user prior to the instantiate function */ ++ DEBUG("Seeding mode is: %s\n", ++ state->status.nonce_mode ? "Nonce" : "Noise"); ++ if (!state->status.nonce_mode) { /* noise seeding */ ++ err = nisttrng_get_entropy_input(state, NULL, 0); ++ if (err) ++ goto ERR; ++ } ++ ++ /* load the personilization string if hardware is configured to accept it */ ++ if (state->config.features.extra_ps_present) { ++ /* if HW is configured to accept personilizatoin string, it will use whatever is in the NPA_DATAx. So, if the string is NULL, just load 0. */ ++ if (!personal_str) ++ personal_str = &zero_ps[0]; ++ ++ err = nisttrng_load_ps_addin(state, personal_str); ++ if (err) ++ goto ERR; ++ } ++ ++ /* initiate the Create_State command and wait on done */ ++ DEBUG("Create the DRBG state\n"); ++ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_CREATE_STATE); ++ err = nisttrng_wait_on_done(state); ++ if (err) ++ goto ERR; ++ ++ /* check STAT register to make sure DRBG is instantiated */ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT); ++ if (!NIST_TRNG_REG_STAT_GET_DRBG_STATE(tmp)) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* reset reminder and alarms counters */ ++ nisttrng_reset_counters(state); ++ ++ //if EDU is available enable RNC and disable prediction resistance , disable all RBC,s ++ //state->config.build_cfg0.edu_present = 0; ++ if (state->config.build_cfg0.edu_present) { ++ //disable prediction resistance ++ err = nisttrng_set_pred_resist(state, 0); ++ if (err) ++ goto ERR; ++ ++ //enable RNC ++ nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE); ++ // disable all RBC,s ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL); ++ for (i = 0; i < state->config.edu_build_cfg0.rbc_channels; ++ i++) { ++ err = nisttrng_rbc(state, 0, i, 0, ++ CHX_URUN_BLANK_AFTER_RESET); ++ if (err) ++ goto ERR; ++ } ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL); ++ ++ } else { ++ /* set the prediction resistance */ ++ err = nisttrng_set_pred_resist(state, pred_resist); ++ if (err) ++ goto ERR; ++ } ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_INSTANTIATE; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_instantiate */ ++EXPORT_SYMBOL(nisttrng_instantiate); ++ ++/* Uninstantiate the DRBG state and zeroize */ ++int nisttrng_uninstantiate(struct nist_trng_state *state) ++{ ++ int err; ++ int err_tmp; ++ u32 tmp; ++ ++ DEBUG(">> %s: uninstantiate the DRBG and zeroize\n", __func__); ++ //printf(" nisttrng_uninstantiate: uninstantiate the DRBG and zeroize\n"); ++ err = CRYPTO_OK; ++ err_tmp = CRYPTO_OK; ++ ++ //disable RNC ++ if (state->config.build_cfg0.edu_present) { ++ if (state->status.edu_vstat.rnc_enabled) { ++ DEBUG("%s: disable RNC\n", __func__); ++ nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE); ++ //always clear the busy bit after disabling RNC ++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, tmp); ++ } ++ } ++ ++ /* if DRBG is instantiated, return CRYPTO_NOT_INSTANTIATED, but still do the zeroize */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) ++ err_tmp = CRYPTO_NOT_INSTANTIATED; ++ ++ /* zeroize */ ++ err = nisttrng_zeroize(state); ++ if (err) ++ goto ERR; ++ ++ if (err == CRYPTO_OK && err_tmp == CRYPTO_NOT_INSTANTIATED) ++ err = CRYPTO_NOT_INSTANTIATED; ++ ++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_uninstantiate */ ++EXPORT_SYMBOL(nisttrng_uninstantiate); ++ ++/* enable/disable specific rbc ++ * rbc_num = rbc channel num ++ * urun_blnk = underrun blanking duration for rbc channel ++ * rate = sets rate of serial entropy output for rbc channel ++ */ ++int nisttrng_rbc(struct nist_trng_state *state, int enable, int rbc_num, int rate, ++ int urun_blnk) ++{ ++ int err = 0; ++ u32 tmp_rbc = 0; ++ ++ tmp_rbc = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL); ++ ++ if (enable) { ++ if (rate > 15) { ++ DEBUG("Incorrect rate = %d\n", rate); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ if (urun_blnk > 3) { ++ DEBUG("Incorrect urun_blnk = %d\n", urun_blnk); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ } else { //disable ++ rate = NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE_AFTER_RESET; ++ urun_blnk = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK_AFTER_RESET; ++ } ++ ++ switch (rbc_num) { ++ case 0: ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE); ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc, ++ _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK); ++ ++ break; ++ case 1: ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE); ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc, ++ _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK); ++ ++ break; ++ case 2: ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE); ++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc, ++ _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK); ++ break; ++ default: ++ DEBUG("Incorrect rbc_num = %d\n", rbc_num); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ pdu_io_write32(state->base + NIST_TRNG_EDU_RBC_CTRL, tmp_rbc); ++ ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++ ++/* Reseed */ ++int nisttrng_reseed(struct nist_trng_state *state, int pred_resist, void *addin_str) ++{ ++ int rnc_flag = 0; ++ int err; ++ ++ DEBUG(">> %s: pred_resist = %u, additional strign existence = %u\n", ++ __func__, pred_resist, (addin_str) ? 1 : 0); ++ ++ if (state->config.build_cfg0.edu_present) { ++ if (state->status.edu_vstat.rnc_enabled) { ++ // disable_rnc ++ err = nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_HOLD); ++ if (err) ++ goto ERR; ++ ++ rnc_flag = 1; ++ } ++ } ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* if the DRBG is not instantiated return error */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) { ++ DEBUG("DRBG is not instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* if pred_resist is set but, pred_resist that the DRBG is instantiated with is not 1, return error */ ++ err = nisttrng_set_pred_resist(state, pred_resist); ++ if (err) ++ goto ERR; ++ ++ /* get entropy - noise seeding. If the mode is nonce, get_entropy must be called by the user prior to the instantiate function */ ++ if (!state->status.nonce_mode) { /* noise seeding */ ++ err = nisttrng_get_entropy_input(state, NULL, 0); ++ if (err) ++ goto ERR; ++ } ++ ++ /* if addin_str is not NULL, it means that the additionl input is available and has to be loaded */ ++ if (addin_str) { ++ /* set the ADDIN_PRESENT field of the MODE register to 1 */ ++ err = nisttrng_set_addin_present(state, 1); ++ if (err) ++ goto ERR; ++ ++ /* load the additional input */ ++ err = nisttrng_load_ps_addin(state, addin_str); ++ if (err) ++ goto ERR; ++ ++ } else { ++ /* set the ADDIN_PRESENT field of the MODE register to 0 */ ++ err = nisttrng_set_addin_present(state, 0); ++ if (err) ++ goto ERR; ++ } ++ ++ /* initiate the reseed and wait on done */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_RENEW_STATE); ++ err = nisttrng_wait_on_done(state); ++ if (err) ++ goto ERR; ++ ++ /* reset reminder and alarms counters */ ++ nisttrng_reset_counters(state); ++ ++ if (rnc_flag) { ++ // rnc_enable ++ err = nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE); ++ if (err) ++ goto ERR; ++ } ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_RESEED; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_reseed */ ++EXPORT_SYMBOL(nisttrng_reseed); ++ ++static int nisttrng_vtrng_wait_on_busy(struct nist_trng_state *state, int priv, int vtrng) ++{ ++ u32 tmp, t; ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ if (priv) { //private vtrng ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VSTAT); ++ } while (NIST_TRNG_REG_EDU_VSTAT_BUSY(tmp) && --t); ++ ++ } else { //public vtrng ++ do { ++ tmp = pdu_io_read32(state->base + ++ NIST_TRNG_EDU_VTRNG_VSTAT0 + ++ 8 * vtrng); ++ } while (NIST_TRNG_REG_EDU_VSTAT_BUSY(tmp) && --t); ++ } ++ ++ if (t) ++ return CRYPTO_OK; ++ ++ SYNHW_PRINT("wait_on_: failed timeout: %08lx\n", ++ (unsigned long)tmp); ++ ++ return CRYPTO_TIMEOUT; ++} /* nisttrng_vtrng_wait_on_busy */ ++ ++int nisttrng_generate_public_vtrng(struct nist_trng_state *state, void *random_bits, ++ unsigned long req_num_bytes, int vtrng) ++{ ++ int err = 0; ++ u32 tmp; ++ unsigned int remained_bytes; ++ unsigned long req_num_blks; ++ int i, j; ++ ++ DEBUG(">> %s : requested number of bytes = %lu, vtrng num = %u\n", ++ __func__, req_num_bytes, vtrng); ++ ++ /* make sure random_bits is not NULL */ ++ if (!random_bits) { ++ DEBUG("random_bits pointer cannot be NULL\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ if (vtrng > state->config.edu_build_cfg0.public_vtrng_channels) { ++ DEBUG("vtrng channel invalid (%u)\n", vtrng); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ if (state->status.edu_vstat.rnc_enabled == 0) { ++ DEBUG("rnc_disabled\n"); ++ } ++ ++ if (state->status.edu_vstat.seed_enum == 0) { ++ DEBUG("not seed_enum\n"); ++ } ++ ++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */ ++ req_num_blks = ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ? ++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) + 1) : ++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS); ++ ++ for (i = 0; i < req_num_blks; i++) { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VCTRL0 + ++ (vtrng * 8)); ++ tmp = NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(tmp, NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM); ++ pdu_io_write32(state->base + NIST_TRNG_EDU_VTRNG_VCTRL0 + (vtrng * 8), ++ tmp); ++ ++ // check busy ++ err = nisttrng_vtrng_wait_on_busy(state, 0, vtrng); ++ if (err) ++ goto ERR; ++ ++ // check for error ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VISTAT0 + ++ (vtrng * 8)); ++ if (NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(tmp)) { ++ DEBUG("EDU_VSTAT_ANY_RW1 set 0x%x\n", tmp); ++ } ++ ++ // check that all valid ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VSTAT0 + ++ 8 * vtrng); ++ if ((NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(tmp) == 0)) { ++ DEBUG("EDU_VSTAT_SLICE_VLD fail 0x%x\n", tmp); ++ } ++ ++ /* read the generated random number block and store */ ++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) { ++ tmp = pdu_io_read32(state->base + ++ NIST_TRNG_EDU_VTRNG_VRAND0_0 + ++ (vtrng * 8) + j); ++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */ ++ remained_bytes = req_num_bytes - ++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4); ++ if (remained_bytes > 4) { ++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, 4); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ 4 * 8; ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ } else { ++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, remained_bytes); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ remained_bytes * 8; ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ break; ++ } ++ } ++ } ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_GENERATE; ++ERR: ++ if (err) ++ random_bits = NULL; ++ ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++ ++static int nisttrng_generate_private_vtrng(struct nist_trng_state *state, void *random_bits, ++ unsigned long req_num_bytes) ++{ ++ int err; ++ u32 tmp; ++ unsigned int remained_bytes; ++ unsigned long req_num_blks; ++ int i, j; ++ ++ DEBUG(">> %s : requested number of bytes = %lu ", ++ __func__, req_num_bytes); ++ ++ /* requested number of bits has to be less that the programmed maximum */ ++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) { ++ SYNHW_PRINT("requested number of bits (%lu) is larger than the set maximum (%lu)\n", ++ (req_num_bytes * 8), state->counters.max_bits_per_req); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* make sure random_bits is not NULL */ ++ if (!random_bits) { ++ SYNHW_PRINT("random_bits pointer cannot be NULL\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ if (state->status.edu_vstat.rnc_enabled == 0) { ++ DEBUG("rnc_disabled\n"); ++ } ++ ++ if (state->status.edu_vstat.seed_enum == 0) { ++ DEBUG("not seed_enum\n"); ++ } ++ ++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */ ++ req_num_blks = ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ? ++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) + 1) : ++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS); ++ ++ for (i = 0; i < req_num_blks; i++) { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VCTRL); ++ tmp = NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(tmp, NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM); ++ pdu_io_write32(state->base + NIST_TRNG_EDU_VCTRL, tmp); ++ ++ // check busy ++ err = nisttrng_vtrng_wait_on_busy(state, 1, 0); ++ if (err) ++ goto ERR; ++ ++ // check for error ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VISTAT); ++ if (NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(tmp)) { ++ DEBUG("EDU_VSTAT_ANY_RW1 set 0x%x\n", tmp); ++ } ++ ++ //check that all valid ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VSTAT); ++ if ((NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(tmp) == 0) || ++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(tmp) == 0)) { ++ DEBUG("EDU_VSTAT_SLICE_VLD fail 0x%x\n", tmp); ++ } ++ ++ /* read the generated random number block and store */ ++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) { ++ tmp = pdu_io_read32(state->base + ++ NIST_TRNG_EDU_VRAND_0 + j); ++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */ ++ remained_bytes = req_num_bytes - ++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4); ++ if (remained_bytes > 4) { ++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, 4); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ 4 * 8; ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ } else { ++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, remained_bytes); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ remained_bytes * 8; ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ break; ++ } ++ } ++ } ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++ ++/* Generate */ ++int nisttrng_generate(struct nist_trng_state *state, void *random_bits, ++ unsigned long req_num_bytes, int req_sec_strength, ++ int pred_resist, void *addin_str) ++{ ++ int err; ++ int reseed_required; ++ ++ DEBUG(">> %s: requested number of bytes = %lu, security strength = %u, pred_resist = %u, additional string existence = %u\n", ++ __func__, req_num_bytes, req_sec_strength, pred_resist, ++ (addin_str) ? 1 : 0); ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* if the DRBG is not instantiated return error */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) { ++ SYNHW_PRINT("DRBG is not instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* requested number of bits has to be less that the programmed maximum */ ++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) { ++ SYNHW_PRINT("requested number of bits (%lu) is larger than the set maximum (%lu)\n", ++ (req_num_bytes * 8), state->counters.max_bits_per_req); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* security strength has to be lower than what the DRBG is instantiated with. set_sec_strength function checks this. */ ++ err = nisttrng_set_sec_strength(state, req_sec_strength); ++ if (err) ++ goto ERR; ++ ++ /* set the prediction resistance - if pred_resist is set but, pred_resist that the DRBG is instantiated with is not 1, return error */ ++ err = nisttrng_set_pred_resist(state, pred_resist); ++ if (err) ++ goto ERR; ++ ++ /* make sure random_bits is not NULL */ ++ if (!random_bits) { ++ DEBUG("random_bits pointer cannot be NULL\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* set the reseed required flag to 0. The loop is to check at the end whether a reseed is required at the end and jump back to reseed and generate if needed. This is the NIST mandated procedure */ ++ reseed_required = 0; ++ ++ if (!addin_str) { ++ /* set the ADDIN_PRESENT field of the MODE register to 1 */ ++ err = nisttrng_set_addin_present(state, 0); ++ if (err) ++ goto ERR; ++ } ++ ++ do { ++ void *generate_addin_str = addin_str; ++ ++ if (pred_resist | reseed_required) { ++ err = nisttrng_reseed(state, pred_resist, addin_str); ++ if (err) ++ goto ERR; ++ ++ /* SP800-90a says that if reseed is executed, any additional input string is only used in the reseed phase and replaced by NULL in the generate phase */ ++ generate_addin_str = NULL; ++ err = nisttrng_set_addin_present(state, 0); ++ if (err) ++ goto ERR; ++ ++ /* ADDIN_PRESENT field in MODE has to be set back to 0 to avoid illegal cmd sequence */ ++ reseed_required = 0; ++ } ++ ++ /* generate process */ ++ if (nisttrng_check_seed_lifetime(state) == CRYPTO_RESEED_REQUIRED) { ++ reseed_required = 1; ++ ++ } else { ++ reseed_required = 0; ++ ++ /* Refresh_Addin command if additional input is not NULL*/ ++ if (generate_addin_str) { ++ err = nisttrng_refresh_addin(state, generate_addin_str); ++ if (err) ++ goto ERR; ++ } ++ ++ /* Generate all random bits */ ++ /* if EDU present then get random number from private vtrng */ ++ ++ //state->config.build_cfg0.edu_present = 0; ++ if (state->config.build_cfg0.edu_present) { ++ err = nisttrng_generate_private_vtrng(state, random_bits, ++ req_num_bytes); ++ if (err) ++ goto ERR; ++ ++ } else { ++ err = nisttrng_gen_random(state, random_bits, ++ req_num_bytes); ++ if (err) ++ goto ERR; ++ ++ /* Advance the state - if it returns CRYPTO_RESEED_REQUIRED, have to jump back and do a reseed and generate */ ++ err = nisttrng_advance_state(state); ++ if (err) ++ goto ERR; ++ } ++ } ++ } while (reseed_required); ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_GENERATE; ++ERR: ++ if (err) ++ random_bits = NULL; ++ ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_generate */ ++EXPORT_SYMBOL(nisttrng_generate); +diff --git a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c +--- a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c 2025-12-23 10:16:19.525059469 +0000 +@@ -0,0 +1,1022 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * This Synopsys software and associated documentation (hereinafter the ++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. The ++ * Software IS NOT an item of Licensed Software or a Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Products ++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark ++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the ++ * trademarks of their respective owners. ++ * ++ * The contents of this file are dual-licensed; you may select either version ++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license ++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the ++ * SOFTWARE. The BSD License is copied below. ++ * ++ * BSD-3-Clause License: ++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates. ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions are met: ++ * ++ * 1. Redistributions of source code must retain the above copyright notice, ++ * this list of conditions, and the following disclaimer, without ++ * modification. ++ * ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * 3. The names of the above-listed copyright holders may not be used to ++ * endorse or promote products derived from this software without specific ++ * prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE ++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR ++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS ++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN ++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ++ * POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include "nisttrng_hw.h" ++#include "nisttrng.h" ++ ++/* Wait functions */ ++static int nisttrng_wait_on_(struct nist_trng_state *state, u32 mask) ++{ ++ u32 tmp; ++ int t; ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ISTAT); ++ } while (!(tmp & (mask | NIST_TRNG_REG_ISTAT_ALARMS)) && --t); ++ ++ if (tmp & NIST_TRNG_REG_ISTAT_ALARMS) ++ return nisttrng_get_alarms(state); ++ ++ if (t) { ++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, mask); ++ return CRYPTO_OK; ++ ++ } else { ++ SYNHW_PRINT("wait_on_: failed timeout: %08lx\n", ++ (unsigned long)tmp); ++ return CRYPTO_TIMEOUT; ++ } ++} /* nisttrng_wait_on_ */ ++ ++int nisttrng_wait_on_done(struct nist_trng_state *state) ++{ ++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_DONE); ++} /* nisttrng_wait_on_done */ ++EXPORT_SYMBOL(nisttrng_wait_on_done); ++ ++int nisttrng_wait_on_noise_rdy(struct nist_trng_state *state) ++{ ++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_NOISE_RDY); ++} /* nisttrng_wait_on_noise_rdy */ ++ ++static int nisttrng_wait_on_zeroize(struct nist_trng_state *state) ++{ ++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_ZEROIZE); ++} /* nisttrng_wait_on_zeroize */ ++ ++static int nisttrng_wait_on_kat_completed(struct nist_trng_state *state) ++{ ++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_KAT_COMPLETE); ++} /* nisttrng_wait_on_kat_completed */ ++ ++int nisttrng_wait_on_busy(struct nist_trng_state *state) ++{ ++ u32 tmp, t; ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT); ++ } while ((tmp & NIST_TRNG_REG_STAT_BUSY) && --t); ++ ++ if (t) ++ return CRYPTO_OK; ++ ++ SYNHW_PRINT("wait_on_busy: failed timeout: %08lx\n", ++ (unsigned long)tmp); ++ return CRYPTO_TIMEOUT; ++} /* nisttrng_wait_on_busy */ ++EXPORT_SYMBOL(nisttrng_wait_on_busy); ++ ++/* Read and return alarm. Zeroize if there is an alarm*/ ++int nisttrng_get_alarms(struct nist_trng_state *state) ++{ ++ u32 tmp; ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ISTAT); ++ if (tmp & NIST_TRNG_REG_ISTAT_ALARMS) { ++ // alarm happened ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ALARM); ++ DEBUG("Received alarm: %lx\n", (unsigned long)tmp); ++ // clear istat ++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, ++ NIST_TRNG_REG_ISTAT_ALARMS); ++ pdu_io_write32(state->base + NIST_TRNG_REG_ALARM, 0x1F); ++ state->status.alarm_code = tmp & 0x1F; ++ ++ /* zeroize if there was an alarm */ ++ if (state->status.alarm_code != ++ NIST_TRNG_REG_ALARM_FAILED_TEST_ID_OK) { ++ nisttrng_zeroize(state); ++ } ++ } else { ++ state->status.alarm_code = 0; ++ } ++ ++ if (state->status.alarm_code) ++ return CRYPTO_FATAL; ++ else ++ return CRYPTO_OK; ++} /* nisttrng_get_alarms */ ++EXPORT_SYMBOL(nisttrng_get_alarms); ++ ++/* Reset reminder and alarm counters */ ++int nisttrng_reset_counters(struct nist_trng_state *state) ++{ ++ state->counters.bits_per_req_left = state->counters.max_bits_per_req; ++ state->counters.req_per_seed_left = state->counters.max_req_per_seed; ++ ++ return 0; ++} /* nisttrng_reset_counters */ ++EXPORT_SYMBOL(nisttrng_reset_counters); ++ ++/* When a zeroize happens some of the struct objects should reset */ ++int nisttrng_reset_state(struct nist_trng_state *state) ++{ ++ nisttrng_reset_counters(state); ++ state->status.pad_ps_addin = 0; ++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE; ++ ++ return 0; ++} /* nisttrng_reset_state */ ++ ++/* ---------- Set field APIs ---------- */ ++ ++/* ++ * Sets the security strength of the DRBG instance. ++ * > req_sec_strength has to be an integer. The API chooses one of SEC_STRNT_AES128 or SEC_STRNT_AES256 as follows: ++ * 0 < req_sec_strength <= 128 --> security strength = SEC_STRNT_AES128 ++ * 128 < req_sec_strength <= 256 --> security strength = SEC_STRNT_AES256 ++ * else --> Invalid security strength ++ * > If the DRBG is instantiated, a new security strength change request with greater security strength will return error. ++ */ ++int nisttrng_set_sec_strength(struct nist_trng_state *state, int req_sec_strength) ++{ ++ int err; ++ u32 tmp; ++ enum nisttrng_sec_strength chosen_sec_strength; ++ ++ DEBUG(">> %s: security strength = %i\n", __func__, ++ req_sec_strength); ++ ++ /* choose the security strength */ ++ /* set the security strength to the lowest security strength greater or equal to the req_sec_strenght from the set {128, 256} */ ++ if (REQ_SEC_STRENGTH_IS_VALID(req_sec_strength)) { ++ if (req_sec_strength > 0 && req_sec_strength <= 128) { ++ chosen_sec_strength = SEC_STRNT_AES128; ++ ++ } else if (((req_sec_strength > 128) && ++ (req_sec_strength <= 256)) && ++ (state->config.features.drbg_arch == AES256)) { ++ chosen_sec_strength = SEC_STRNT_AES256; ++ ++ } else { /* should not get here, because we have already checked the validity */ ++ DEBUG("Invalid security strength\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ } else { ++ DEBUG("Invalid security strength\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ DEBUG("chosen security strength = %u\n", chosen_sec_strength); ++ ++ /* set the security strenght - at this point security strength is validated and converted */ ++ if (DRBG_INSTANTIATED(state->status.current_state) && ++ chosen_sec_strength != state->status.sec_strength) { ++ /* security strength can only change when the DRBG is not instantiated. */ ++ /* if the new security strength is less that what the DRBG is instantiated with, accept it, but don't change in HW. If it's more, return error */ ++ if (chosen_sec_strength < state->status.sec_strength) { ++ DEBUG("Lowering the security strength. DRBG is already instantiated.\n"); ++ state->status.pad_ps_addin = 4; ++ state->status.sec_strength = chosen_sec_strength; ++ ++ } else { ++ state->status.pad_ps_addin = 0; ++ DEBUG("Cannot select a higher security strenght once the DRBG is instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ } else { ++ DEBUG("Updating the security strength.\n"); ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE); ++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, chosen_sec_strength); ++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp); ++ ++ state->status.pad_ps_addin = 0; ++ state->status.sec_strength = chosen_sec_strength; ++ } ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_set_sec_strength */ ++EXPORT_SYMBOL(nisttrng_set_sec_strength); ++ ++/* ++ * Sets the ADDIN_PRESENT field of the MODE register according to the addin_present input. ++ */ ++int nisttrng_set_addin_present(struct nist_trng_state *state, int addin_present) ++{ ++ u32 tmp; ++ ++ DEBUG(">> %s, adding_present = %u\n", __func__, ++ addin_present); ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE); ++ tmp = NIST_TRNG_REG_MODE_SET_ADDIN_PRESENT(tmp, addin_present); ++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp); ++ ++ DEBUG("--- Return %s, err = %i\n", __func__, 0); ++ return 0; ++} /* nisttrng_set_addin_present */ ++EXPORT_SYMBOL(nisttrng_set_addin_present); ++ ++/* ++ * Sets the PRED_RESIST field of the MODE register according to the pred_resist input. ++ * > If the DRBG is instantiated with prediction resistance of 0, and a change to the prediction resistance of 1 is requested, ++ * the API will return an error. ++ */ ++int nisttrng_set_pred_resist(struct nist_trng_state *state, int pred_resist) ++{ ++ int err; ++ u32 tmp; ++ ++ DEBUG(">> %s: pred_resist = %u\n", __func__, pred_resist); ++ ++ /* if DRBG is instantiated, prediction resistance can only change from 1 to 0 and not vice versa. This is a NIST requirement. */ ++ if (DRBG_INSTANTIATED(state->status.current_state) && pred_resist && ++ !state->status.pred_resist) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE); ++ tmp = NIST_TRNG_REG_MODE_SET_PRED_RESIST(tmp, pred_resist); ++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp); ++ ++ state->status.pred_resist = pred_resist; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_set_pred_resist */ ++EXPORT_SYMBOL(nisttrng_set_pred_resist); ++ ++/* ++ * Puts the NIST_TRNG in either the SECURE or PROMISCUOUS mode. ++ * > A value of 1 for secure_mode puts the core in the SECURE mode and a value of 0 puts it in the PROMISCUOUS mode. ++ * > Any change to the secure mode of the NIST_TRNG will result in a complete zeroize, and will set the seeding mode to self-seeding. ++ * A zeroize will not destroy the programmed mode and ALARM register value. ++ * It keeps the programmed mode to avoid re-programming. ++ * It also, maintains the ALARM register value, so that the user can read the value to understand the reason of the occurred alarm. ++ */ ++int nisttrng_set_secure_mode(struct nist_trng_state *state, int secure_mode) ++{ ++ int err; ++ u32 tmp; ++ int t; ++ ++ DEBUG(">> %s: secure_mode = %u\n", __func__, secure_mode); ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_SMODE); ++ tmp = NIST_TRNG_REG_SMODE_SET_SECURE_EN(tmp, secure_mode); ++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp); ++ ++ /* wait until STAT register indicates that the mode is applied */ ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT); ++ } while ((NIST_TRNG_REG_STAT_GET_SECURE(tmp) != secure_mode) && --t); ++ ++ if (!t) { ++ err = CRYPTO_TIMEOUT; ++ goto ERR; ++ } ++ ++ /* if secure mode changes, a zeroize will happen in HW. */ ++ if (state->status.secure_mode != secure_mode) { ++ DEBUG("secure mode changed. zeroize happened. reset sw state\n"); ++ /* nonce mode goes back to default. */ ++ state->status.nonce_mode = 0; ++ /* reset the SW state */ ++ nisttrng_reset_state(state); ++ } ++ ++ state->status.secure_mode = secure_mode; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_set_secure_mode */ ++EXPORT_SYMBOL(nisttrng_set_secure_mode); ++ ++/* ++ * To change the seeding mode of the NIST_TRNG. ++ * > A value of 1 for nonce_mode will put the NIST_TRNG in the nonce seeding mode, which means that the seed will be provided by the user, ++ * unlike the noise or self-seeding mode (normal mode of operation) in which the seed is generated by the internal entropy source. ++ * > Any transition to or from the nonce mode will zeroize the NIST_TRNG. ++ */ ++int nisttrng_set_nonce_mode(struct nist_trng_state *state, int nonce_mode) ++{ ++ int err; ++ u32 tmp; ++ int t; ++ ++ DEBUG(">> %s: nonce_mode = %u\n", __func__, nonce_mode); ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_SMODE); ++ tmp = NIST_TRNG_REG_SMODE_SET_NONCE(tmp, nonce_mode); ++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp); ++ ++ /* wait until STAT register indicates that the mode is applied */ ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT); ++ } while ((NIST_TRNG_REG_STAT_GET_NONCE(tmp) != nonce_mode) && --t); ++ ++ if (!t) { ++ err = CRYPTO_TIMEOUT; ++ goto ERR; ++ } ++ ++ /* if nonce mode changes, a zeroize will happen in HW. */ ++ if (state->status.nonce_mode != nonce_mode) { ++ DEBUG("nonce mode changed. zeroize happened. reset sw state\n"); ++ /* reset the SW state */ ++ nisttrng_reset_state(state); ++ } ++ ++ state->status.nonce_mode = nonce_mode; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_set_nonce_mode */ ++EXPORT_SYMBOL(nisttrng_set_nonce_mode); ++ ++/* ---------- Load data APIs ---------- */ ++/* ++ * Loads the additional input or personalization string into the NPA_DATAx registers. ++ * > Loads the proper number of bits (256 or 384) according to the security strength stored in the state handle. ++ */ ++int nisttrng_load_ps_addin(struct nist_trng_state *state, void *input_str) ++{ ++ int err; ++ int i, j; ++ int str_size; ++ ++ DEBUG(">> %s starts...\n", __func__); ++ ++ /* return error if the pointer is NULL */ ++ if (!input_str) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* calculate the length based on the security strength */ ++ if (state->status.sec_strength == SEC_STRNT_AES128) ++ str_size = 8; /* 256/32 */ ++ else if (state->status.sec_strength == SEC_STRNT_AES256) ++ str_size = 12; /* 384/32 */ ++ ++ for (i = 0; i < str_size; i++) { ++ pdu_io_write32(state->base + NIST_TRNG_REG_NPA_DATA0 + i, ++ ((u32 *)input_str)[i]); ++ } ++ ++ j = str_size + state->status.pad_ps_addin; ++ /* if security strength is lowered after the DRBG is instantiated, pad PS and ADDIN with 0 at the MSB side */ ++ DEBUG("pad NPA_DATA with %u zeros at the MSB side\n", ++ state->status.pad_ps_addin); ++ for (i = str_size; i < j; i++) ++ pdu_io_write32(state->base + NIST_TRNG_REG_NPA_DATA0 + i, 0); ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_load_ps_addin */ ++EXPORT_SYMBOL(nisttrng_load_ps_addin); ++ ++/* ---------- Command APIs ---------- */ ++/* ++ * Provides entropy and is used in both nonce and noise (self) seeding modes of operation: ++ * > If the NIST_TRNG is in the nonce mode, entropy must be provided by the user; otherwise (in the self-seeding mode) entropy will be generated by the internal entropy source of the NIST_TRNG. ++ * > In the noise mode, calling the API will initiate a seeding command. Depending on the programmed security strength, a 256 or 384-bit seed will be generated. ++ * > Inputs 2 and 3 are only used when the core is in the nonce mode. ++ * > In the nonce mode, the NIST_TRNG can be seeded either through 2 or 3 blocks of 512-bit nonce values which are passed to the internal derivation function to increase the entropy, ++ * or it can be seeded by a 256 or 384-bit nonce written directly into the SEEDx registers. ++ * Passing a value of 1 to nonce_operation selects the former scenario and a value of 0 selects the latter. ++ * > The input_nonce pointer must point to a memory location with a sufficient number of initialized bits. ++ * > Table below shows the required number of bits depending on the nonce_operation and the security strength values. ++ * nonce_operation | Security Strength | Bit length requirement ++ * ------------------------------------------------------------------------------------------ ++ * 1 (using the Derivation Function) | SEC_STRNT_AES128 | 2x512 = 1024 ++ * 1 (using the Derivation Function) | SEC_STRNT_AES256 | 3x512 = 1536 ++ * 0 (loading the seed into SEEDx) | SEC_STRNT_AES128 | 256 ++ * 0 (loading the seed into SEEDx) | SEC_STRNT_AES256 | 384 ++ * > Generated entropy is secret information held securely within the HW and remains inaccessible to the user, unless the HW core is in the PROMISCUOUS mode. ++ */ ++int nisttrng_get_entropy_input(struct nist_trng_state *state, void *input_nonce, ++ int nonce_operation) ++{ ++ int err; ++ int nonce_ld_cntr = 0; ++ int i, j; ++ ++ DEBUG(">> %s: seeding mode = %s, nonce_operation = %u\n", __func__, ++ (state->status.nonce_mode ? "Nonce" : "Noise"), nonce_operation); ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* --- Seeding --- */ ++ if (state->status.nonce_mode) { /* --- nonce mode --- */ ++ if (!input_nonce) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ nonce_ld_cntr = 0; ++ ++ if (state->status.sec_strength == SEC_STRNT_AES128) ++ nonce_ld_cntr = 2; ++ else if (state->status.sec_strength == SEC_STRNT_AES256) ++ nonce_ld_cntr = 3; ++ ++ if (nonce_operation) { /* load the noise inside NPA_DATAx register and issue gen_nonce command */ ++ for (i = 0; i < nonce_ld_cntr; i++) { ++ /* load the nonoce */ ++ for (j = 0; j < 16; j++) { ++ pdu_io_write32(state->base + ++ NIST_TRNG_REG_NPA_DATA0 + j, ++ ((u32 *)input_nonce)[16 * i + j]); ++ } ++ ++ /* issue the command and wait on done */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_GEN_NONCE); ++ ++ if (nisttrng_wait_on_done(state)) { ++ err = CRYPTO_FATAL; ++ goto ERR; ++ }; ++ } ++ ++ } else { ++ /* load the nonoce */ ++ for (i = 0; i < 4 * nonce_ld_cntr; i++) { ++ pdu_io_write32(state->base + NIST_TRNG_REG_SEED0 + i, ++ ((u32 *)input_nonce)[i]); ++ } ++ } ++ } else { /* --- noise mode --- */ ++ /* issue the command and wait on done */ ++ DEBUG("issue the Gen_Noise command\n"); ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_GEN_NOISE); ++ ++ if (nisttrng_wait_on_done(state)) { ++ err = CRYPTO_FATAL; ++ goto ERR; ++ }; ++ } ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_get_entropy_input */ ++EXPORT_SYMBOL(nisttrng_get_entropy_input); ++ ++/* ++ * Generate Function: ++ * > The Generate function in NIST_TRNG HW is broken down into 3 steps: Refresh_Addin, Gen_Random and Advance_State. ++ * nisttrng_generate incorporates all these steps and some extra checks into one simple API. ++ * > There is one API for each step, below || ++ * \/ ++ */ ++/* ++ * Generate Part 1 - Refresh_Addin: Additional input string is used to add to the HW state entropy. ++ * > This API calls nisttrng_set_addin_present to set the ADDIN_PRESENT field of the MODE register to 1. ++ * > Then it loads the additional input provided by addin_str pointer into the NPA_DATAx by calling the nisttrng_load_ps_addin. ++ * > Then, it issues a Refresh_Addin command to the HW. ++ * > If the addin_str pointer is NULL, the API will return error. ++ */ ++int nisttrng_refresh_addin(struct nist_trng_state *state, void *addin_str) ++{ ++ int err; ++ ++ DEBUG(">> %s starts...\n", __func__); ++ ++ /* if the DRBG is not intantiated return error */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* This API should not be called with a NULL additional input string */ ++ if (!addin_str) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* set the ADDIN_PRESENT field of the MODE register to 1 */ ++ err = nisttrng_set_addin_present(state, 1); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_load_ps_addin(state, addin_str); ++ if (err) ++ goto ERR; ++ ++ /* execute the command and wait on done*/ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_REFRESH_ADDIN); ++ ++ err = nisttrng_wait_on_done(state); ++ if (err) ++ goto ERR; ++ ++ err = 0; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_refresh_addin */ ++EXPORT_SYMBOL(nisttrng_refresh_addin); ++ ++/* ++ * Generate Part 2 - Gen_Random: generates the requested number of bits. ++ * > This API issues the Gen_Random command to the HW as many times as indicated by req_num_bytes to generate the requested number of bits. ++ * > If the requested number of bits (i.e. 128×req_num_blks) is more than the maximum value specified by max_bits_per_req, the API will return error. ++ * > Random bits will be returned in random_bits. ++ */ ++int nisttrng_gen_random(struct nist_trng_state *state, void *random_bits, ++ unsigned long req_num_bytes) ++{ ++ int err; ++ int i, j; ++ u32 tmp; ++ unsigned int remained_bytes; ++ unsigned long req_num_blks; ++ ++ DEBUG(">> %s: req_num_bytes = %lu\n", __func__, req_num_bytes); ++ ++ /* if the DRBG is not intantiated return error */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* requested number of bits has to be less that the programmed maximum */ ++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) { ++ DEBUG("requested number of bits (%lu) is larger than the set maximum (%lu)\n", ++ (req_num_bytes * 8), state->counters.max_bits_per_req); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* make sure random_bits is not NULL */ ++ if (!random_bits) { ++ DEBUG("random_bits pointer cannot be NULL\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */ ++ req_num_blks = ++ ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ? ++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) + ++ 1) : ++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS); ++ ++ for (i = 0; i < req_num_blks; i++) { ++ /* issue gen_random and wait on done */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_GEN_RANDOM); ++ ++ err = nisttrng_wait_on_done(state); ++ if (err) ++ goto ERR; ++ ++ /* read the generated random number block and store */ ++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_RAND0 + j); ++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */ ++ remained_bytes = req_num_bytes - ++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4); ++ if (remained_bytes > 4) { ++ memcpy(random_bits + ++ i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, 4); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ 4 * 8; ++ ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ } else { ++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) + ++ j * 4, &tmp, remained_bytes); ++ ++ /* decrement the bits counter and return error if generated more than the maximum*/ ++ state->counters.bits_per_req_left = ++ state->counters.bits_per_req_left - ++ remained_bytes * 8; ++ ++ if (state->counters.bits_per_req_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ break; ++ } ++ } ++ } ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_gen_random */ ++EXPORT_SYMBOL(nisttrng_gen_random); ++ ++/* ++ * Generate Part 3 - Advance the state: advances the state of the DRBG. ++ * > This API issues the Advance_State command to the HW. ++ * > Then it updates the counter for the number of generate requests per seed. ++ * > The counter must be checked every time before starting the Generate process and a reseed must be issued if the limit is reached. This check is incorporated inside nisttrng_generate API. ++ * > Note that we don't have to provide additional input again for this API, because if it had been provided in refresh_addin stage, HW will lock the NPA_DATAx, so it will be still available ++ */ ++int nisttrng_advance_state(struct nist_trng_state *state) ++{ ++ int err; ++ ++ DEBUG(">> %s starts...\n", __func__); ++ ++ /* if the DRBG is not intantiated return error */ ++ if (!DRBG_INSTANTIATED(state->status.current_state)) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* make sure there is no alarm and the core is not busy */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = nisttrng_wait_on_busy(state); ++ if (err) ++ goto ERR; ++ ++ /* issue advance_state and wait on done */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_ADVANCE_STATE); ++ err = nisttrng_wait_on_done(state); ++ if (err) ++ goto ERR; ++ ++ /* generate is finished, reset the bits_per_req_left counter */ ++ state->counters.bits_per_req_left = state->counters.max_bits_per_req; ++ ++ --state->counters.req_per_seed_left; ++ if (state->counters.req_per_seed_left < 0) { ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } /* just a check */ ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_advance_state */ ++ ++int nisttrng_check_seed_lifetime(struct nist_trng_state *state) ++{ ++ int err; ++ ++ if (state->counters.req_per_seed_left <= 0) { ++ DEBUG("maximum number of requests per seed is reached\n"); ++ err = CRYPTO_RESEED_REQUIRED; ++ goto ERR; ++ } ++ ++ err = CRYPTO_OK; ++ERR: ++ return err; ++} ++EXPORT_SYMBOL(nisttrng_advance_state); ++ ++/* ++ * Perform Known Answer Test ++ * > The NIST_TRNG can perform a KAT on the DRBG and the derivation function inside the entropy source. There are also two different vectors available to do the KAT. ++ * > The kat_sel input selects whether the KAT should be performed on the DRBG or the derivation function. ++ * > The kat_vec input chooses the KAT vector. ++ * > Selections are done by writing the values to the MODE register. ++ * > If the KAT fails, the API returns error. ++ */ ++int nisttrng_kat(struct nist_trng_state *state, int kat_sel, int kat_vec) ++{ ++ int err; ++ u32 tmp; ++ ++ DEBUG(">> %s: kat_sel = %u, kat_vec = %u\n", __func__, ++ kat_sel, kat_vec); ++ ++ /* set KAT_SEL and KAT_VEC */ ++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE); ++ tmp = NIST_TRNG_REG_MODE_SET_KAT_SEL(tmp, kat_sel); ++ tmp = NIST_TRNG_REG_MODE_SET_KAT_VEC(tmp, kat_vec); ++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp); ++ ++ /* issue the command and wait on kat_completed */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_KAT); ++ ++ err = nisttrng_wait_on_kat_completed(state); ++ if (err) ++ goto ERR; ++ ++ /* check for alarms */ ++ err = nisttrng_get_alarms(state); ++ if (err) ++ goto ERR; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_kat */ ++EXPORT_SYMBOL(nisttrng_kat); ++ ++/* ++ * Performs a full KAT with all four combinations of the kat_sel and kat_vec ++ * > If any of the KAT fails, the API returns error. ++ */ ++int nisttrng_full_kat(struct nist_trng_state *state) ++{ ++ int err; ++ ++ DEBUG(">> %s starts...\n", __func__); ++ ++ /* SEL = 0, Vec = 0 */ ++ err = nisttrng_kat(state, 0, 0); ++ if (err) ++ goto ERR; ++ ++ /* SEL = 0, Vec = 1 */ ++ err = nisttrng_kat(state, 0, 1); ++ if (err) ++ goto ERR; ++ ++ /* SEL = 1, Vec = 0 */ ++ err = nisttrng_kat(state, 1, 0); ++ if (err) ++ goto ERR; ++ ++ /* SEL = 1, Vec = 1 */ ++ err = nisttrng_kat(state, 1, 1); ++ if (err) ++ goto ERR; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_full_kat */ ++EXPORT_SYMBOL(nisttrng_full_kat); ++ ++/* ++ * max_bits_per_req reminder initialized by nisttrng_init can change using this API. ++ * > If this API is called when the DRBG is instantiated, an error will be returned. ++ * > If the requested maximum is more than the standard's limit (determinded by NIST_TRNG_DFLT_MAX_BITS_PER_REQ), the API will return an error. ++ */ ++int nisttrng_set_reminder_max_bits_per_req(struct nist_trng_state *state, ++ unsigned long max_bits_per_req) ++{ ++ int err; ++ ++ DEBUG(">> %s: %lu\n", __func__, max_bits_per_req); ++ ++ /* if the DRBG is instantiated, cannot change the value */ ++ if (DRBG_INSTANTIATED(state->status.current_state)) { ++ DEBUG("cannot change the reminder value when DRBG is already instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* requested value cannot be more than NIST's limit */ ++ if (max_bits_per_req > NIST_DFLT_MAX_BITS_PER_REQ) { ++ DEBUG("requested max_bits_per_req is more than standard's limit\n"); ++ err = CRYPTO_INVALID_ARGUMENT; ++ goto ERR; ++ } ++ ++ state->counters.max_bits_per_req = max_bits_per_req; ++ state->counters.bits_per_req_left = max_bits_per_req; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++EXPORT_SYMBOL(nisttrng_set_reminder_max_bits_per_req); ++ ++/* ++ * max_req_per_seed reminder initialized by nisttrng_init can change using this API. ++ * > If this API is called when the DRBG is instantiated, an error will be returned. ++ * > If the requested maximum is more than the standard's limit (determinded by NIST_TRNG_DFLT_MAX_REQ_PER_SEED), the API will return an error. ++ */ ++int nisttrng_set_reminder_max_req_per_seed(struct nist_trng_state *state, ++ unsigned long long max_req_per_seed) ++{ ++ int err; ++ ++ DEBUG(">> %s: %llu\n", __func__, max_req_per_seed); ++ ++ /* if the DRBG is instantiated, cannot change the value */ ++ if (DRBG_INSTANTIATED(state->status.current_state)) { ++ DEBUG("cannot change the reminder value when DRBG is already instantiated\n"); ++ err = CRYPTO_FAILED; ++ goto ERR; ++ } ++ ++ /* requested value cannot be more than NIST's limit */ ++ if (max_req_per_seed > NIST_DFLT_MAX_REQ_PER_SEED) { ++ DEBUG("requested max_req_per_seed is more than standard's limit\n"); ++ err = CRYPTO_INVALID_ARGUMENT; ++ goto ERR; ++ } ++ ++ state->counters.max_req_per_seed = max_req_per_seed; ++ state->counters.req_per_seed_left = max_req_per_seed; ++ ++ err = CRYPTO_OK; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++EXPORT_SYMBOL(nisttrng_set_reminder_max_req_per_seed); ++ ++/* ++ * Zeroize command ++ * > A zeroize will not destroy the programmed mode and ALARM register value. ++ * It keeps the programmed mode to avoid re-programming. ++ * It also, maintains the ALARM register value, so that the user can read the value to understand the reason of the occurred alarm. ++ */ ++int nisttrng_zeroize(struct nist_trng_state *state) ++{ ++ int err; ++ ++ DEBUG(">> %s: zeroize the core\n", __func__); ++ ++ /* issue zeroize command */ ++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL, ++ NIST_TRNG_REG_CTRL_CMD_ZEROIZE); ++ ++ /* wait on zeroize done */ ++ err = nisttrng_wait_on_zeroize(state); ++ if (err) ++ goto ERR; ++ ++ /* reset the SW state */ ++ nisttrng_reset_state(state); ++ ++ err = CRYPTO_OK; ++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE; ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} /* nisttrng_zeroize */ ++EXPORT_SYMBOL(nisttrng_zeroize); ++ ++int nisttrng_rnc(struct nist_trng_state *state, int rnc_ctrl_cmd) ++{ ++ int err = 0; ++ u32 tmp; ++ ++ DEBUG(">> %s cmd %d\n", __func__, rnc_ctrl_cmd); ++ ++ if (rnc_ctrl_cmd > NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE) { ++ DEBUG(">> Invalid cmd %d\n", rnc_ctrl_cmd); ++ err = -1; ++ goto ERR; ++ } ++ ++ if (!state->config.build_cfg0.edu_present) { ++ DEBUG(">> edu not present\n"); ++ err = -1; ++ goto ERR; ++ } ++ ++ pdu_io_write32(state->base + NIST_TRNG_EDU_RNC_CTRL, rnc_ctrl_cmd); ++ if (rnc_ctrl_cmd == NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE) { ++ // wait till rnc is enabled ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT); ++ } while (!NIST_TRNG_EDU_STAT_RNC_ENABLED(tmp)); ++ ++ state->status.edu_vstat.rnc_enabled = 1; ++ ++ } else { ++ // wait till rnc is idle (disabled) ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT); ++ } while (NIST_TRNG_EDU_STAT_RNC_ENABLED(tmp)); ++ ++ state->status.edu_vstat.rnc_enabled = 0; ++ } ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} ++EXPORT_SYMBOL(nisttrng_rnc); ++ ++int nisttrng_wait_fifo_full(struct nist_trng_state *state) ++{ ++ int err = 0; ++ u32 tmp, t; ++ ++ t = NIST_TRNG_RETRY_MAX; ++ ++ DEBUG(">> %s starts...\n", __func__); ++ ++ do { ++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT); ++ } while ((!NIST_TRNG_EDU_STAT_FIFO_FULL(tmp)) && --t); ++ ++ if (t) { ++ err = CRYPTO_OK; ++ } else { ++ DEBUG("wait_on_fifo_full: failed timeout: %08lx\n", ++ (unsigned long)tmp); ++ err = CRYPTO_TIMEOUT; ++ goto ERR; ++ } ++ ++ERR: ++ DEBUG("--- Return %s, err = %i\n", __func__, err); ++ return err; ++} +diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c +--- a/drivers/char/ipmi/kcs_bmc_aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/char/ipmi/kcs_bmc_aspeed.c 2025-12-23 10:16:21.105032988 +0000 +@@ -1,13 +1,14 @@ + // SPDX-License-Identifier: GPL-2.0 + /* + * Copyright (c) 2015-2018, Intel Corporation. ++ * Copyright (c) 2023, Aspeed Technology Inc. + */ +- + #define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt + + #include + #include + #include ++#include + #include + #include + #include +@@ -23,10 +24,9 @@ + + #include "kcs_bmc_device.h" + ++#define DEVICE_NAME "aspeed-kcs-bmc" + +-#define DEVICE_NAME "ast-kcs-bmc" +- +-#define KCS_CHANNEL_MAX 4 ++static DEFINE_IDA(aspeed_kcs_bmc_ida); + + /* + * Field class descriptions +@@ -38,87 +38,74 @@ + * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1) + * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y + */ +- +-#define LPC_TYIRQX_LOW 0b00 +-#define LPC_TYIRQX_HIGH 0b01 +-#define LPC_TYIRQX_RSVD 0b10 +-#define LPC_TYIRQX_RISING 0b11 +- +-#define LPC_HICR0 0x000 +-#define LPC_HICR0_LPC3E BIT(7) +-#define LPC_HICR0_LPC2E BIT(6) +-#define LPC_HICR0_LPC1E BIT(5) +-#define LPC_HICR2 0x008 +-#define LPC_HICR2_IBFIE3 BIT(3) +-#define LPC_HICR2_IBFIE2 BIT(2) +-#define LPC_HICR2_IBFIE1 BIT(1) +-#define LPC_HICR4 0x010 +-#define LPC_HICR4_LADR12AS BIT(7) +-#define LPC_HICR4_KCSENBL BIT(2) +-#define LPC_SIRQCR0 0x070 ++#define HICR0 0x000 ++#define HICR0_LPC3E BIT(7) ++#define HICR0_LPC2E BIT(6) ++#define HICR0_LPC1E BIT(5) ++#define HICR2 0x008 ++#define HICR2_IBFIE3 BIT(3) ++#define HICR2_IBFIE2 BIT(2) ++#define HICR2_IBFIE1 BIT(1) ++#define HICR4 0x010 ++#define HICR4_LADR12AS BIT(7) ++#define HICR4_KCSENBL BIT(2) ++#define LADR3H 0x014 ++#define LADR3L 0x018 ++#define LADR12H 0x01C ++#define LADR12L 0x020 ++#define IDR1 0x024 ++#define IDR2 0x028 ++#define IDR3 0x02C ++#define ODR1 0x030 ++#define ODR2 0x034 ++#define ODR3 0x038 ++#define STR1 0x03C ++#define STR2 0x040 ++#define STR3 0x044 ++#define SIRQCR0 0x070 + /* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */ +-#define LPC_SIRQCR0_IRQ12E1 BIT(1) +-#define LPC_SIRQCR0_IRQ1E1 BIT(0) +-#define LPC_HICR5 0x080 +-#define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20) +-#define LPC_HICR5_ID3IRQX_SHIFT 20 +-#define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16) +-#define LPC_HICR5_ID2IRQX_SHIFT 16 +-#define LPC_HICR5_SEL3IRQX BIT(15) +-#define LPC_HICR5_IRQXE3 BIT(14) +-#define LPC_HICR5_SEL2IRQX BIT(13) +-#define LPC_HICR5_IRQXE2 BIT(12) +-#define LPC_LADR3H 0x014 +-#define LPC_LADR3L 0x018 +-#define LPC_LADR12H 0x01C +-#define LPC_LADR12L 0x020 +-#define LPC_IDR1 0x024 +-#define LPC_IDR2 0x028 +-#define LPC_IDR3 0x02C +-#define LPC_ODR1 0x030 +-#define LPC_ODR2 0x034 +-#define LPC_ODR3 0x038 +-#define LPC_STR1 0x03C +-#define LPC_STR2 0x040 +-#define LPC_STR3 0x044 +-#define LPC_HICRB 0x100 +-#define LPC_HICRB_EN16LADR2 BIT(5) +-#define LPC_HICRB_EN16LADR1 BIT(4) +-#define LPC_HICRB_IBFIE4 BIT(1) +-#define LPC_HICRB_LPC4E BIT(0) +-#define LPC_HICRC 0x104 +-#define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4) +-#define LPC_HICRC_ID4IRQX_SHIFT 4 +-#define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2) +-#define LPC_HICRC_TY4IRQX_SHIFT 2 +-#define LPC_HICRC_OBF4_AUTO_CLR BIT(1) +-#define LPC_HICRC_IRQXE4 BIT(0) +-#define LPC_LADR4 0x110 +-#define LPC_IDR4 0x114 +-#define LPC_ODR4 0x118 +-#define LPC_STR4 0x11C +-#define LPC_LSADR12 0x120 +-#define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16) +-#define LPC_LSADR12_LSADR2_SHIFT 16 +-#define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0) +-#define LPC_LSADR12_LSADR1_SHIFT 0 +- +-#define OBE_POLL_PERIOD (HZ / 2) +- +-enum aspeed_kcs_irq_mode { +- aspeed_kcs_irq_none, +- aspeed_kcs_irq_serirq, +-}; ++#define SIRQCR0_IRQ12E1 BIT(1) ++#define SIRQCR0_IRQ1E1 BIT(0) ++#define HICR5 0x080 ++#define HICR5_ID3IRQX GENMASK(23, 20) ++#define HICR5_ID2IRQX GENMASK(19, 16) ++#define HICR5_SEL3IRQX BIT(15) ++#define HICR5_IRQXE3 BIT(14) ++#define HICR5_SEL2IRQX BIT(13) ++#define HICR5_IRQXE2 BIT(12) ++#define HICRB 0x100 ++#define HICRB_EN16LADR2 BIT(5) ++#define HICRB_EN16LADR1 BIT(4) ++#define HICRB_IBFIE4 BIT(1) ++#define HICRB_LPC4E BIT(0) ++#define HICRC 0x104 ++#define HICRC_ID4IRQX GENMASK(7, 4) ++#define HICRC_SEL4IRQX BIT(2) ++#define HICRC_OBF4_AUTO_CLR BIT(1) ++#define HICRC_IRQXE4 BIT(0) ++#define LADR4 0x110 ++#define IDR4 0x114 ++#define ODR4 0x118 ++#define STR4 0x11C ++#define LSADR12 0x120 ++#define LSADR12_LSADR2 GENMASK(31, 16) ++#define LSADR12_LSADR1 GENMASK(15, 0) ++ ++#define KCS_HW_INSTANCE_NUM 4 ++#define KCS_OBE_POLL_PERIOD (HZ / 2) + + struct aspeed_kcs_bmc { + struct kcs_bmc_device kcs_bmc; +- + struct regmap *map; ++ int irq; ++ ++ u32 io_addr; ++ u32 hw_inst; + + struct { +- enum aspeed_kcs_irq_mode mode; +- int id; +- } upstream_irq; ++ u32 id; ++ u32 type; ++ } sirq; + + struct { + spinlock_t lock; +@@ -127,6 +114,13 @@ + } obe; + }; + ++static const struct kcs_ioreg aspeed_kcs_ioregs[KCS_HW_INSTANCE_NUM] = { ++ { .idr = IDR1, .odr = ODR1, .str = STR1 }, ++ { .idr = IDR2, .odr = ODR2, .str = STR2 }, ++ { .idr = IDR3, .odr = ODR3, .str = STR3 }, ++ { .idr = IDR4, .odr = ODR4, .str = STR4 }, ++}; ++ + static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc) + { + return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc); +@@ -134,11 +128,11 @@ + + static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg) + { +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); ++ struct aspeed_kcs_bmc *kcs_aspeed = to_aspeed_kcs_bmc(kcs_bmc); + u32 val = 0; + int rc; + +- rc = regmap_read(priv->map, reg, &val); ++ rc = regmap_read(kcs_aspeed->map, reg, &val); + WARN(rc != 0, "regmap_read() failed: %d\n", rc); + + return rc == 0 ? (u8) val : 0; +@@ -146,50 +140,50 @@ + + static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data) + { +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); ++ struct aspeed_kcs_bmc *kcs_aspeed = to_aspeed_kcs_bmc(kcs_bmc); + int rc; + +- rc = regmap_write(priv->map, reg, data); ++ rc = regmap_write(kcs_aspeed->map, reg, data); + WARN(rc != 0, "regmap_write() failed: %d\n", rc); + + /* Trigger the upstream IRQ on ODR writes, if enabled */ + + switch (reg) { +- case LPC_ODR1: +- case LPC_ODR2: +- case LPC_ODR3: +- case LPC_ODR4: ++ case ODR1: ++ case ODR2: ++ case ODR3: ++ case ODR4: + break; + default: + return; + } + +- if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq) ++ if (kcs_aspeed->sirq.type == IRQ_TYPE_NONE) + return; + +- switch (kcs_bmc->channel) { +- case 1: +- switch (priv->upstream_irq.id) { ++ switch (kcs_aspeed->hw_inst) { ++ case 0: ++ switch (kcs_aspeed->sirq.id) { + case 12: +- regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1, +- LPC_SIRQCR0_IRQ12E1); ++ regmap_update_bits(kcs_aspeed->map, SIRQCR0, SIRQCR0_IRQ12E1, ++ SIRQCR0_IRQ12E1); + break; + case 1: +- regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1, +- LPC_SIRQCR0_IRQ1E1); ++ regmap_update_bits(kcs_aspeed->map, SIRQCR0, SIRQCR0_IRQ1E1, ++ SIRQCR0_IRQ1E1); + break; + default: + break; + } + break; ++ case 1: ++ regmap_update_bits(kcs_aspeed->map, HICR5, HICR5_IRQXE2, HICR5_IRQXE2); ++ break; + case 2: +- regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2); ++ regmap_update_bits(kcs_aspeed->map, HICR5, HICR5_IRQXE3, HICR5_IRQXE3); + break; + case 3: +- regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3); +- break; +- case 4: +- regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4); ++ regmap_update_bits(kcs_aspeed->map, HICRC, HICRC_IRQXE4, HICRC_IRQXE4); + break; + default: + break; +@@ -198,86 +192,116 @@ + + static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val) + { +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); ++ struct aspeed_kcs_bmc *kcs_aspeed = to_aspeed_kcs_bmc(kcs_bmc); + int rc; + +- rc = regmap_update_bits(priv->map, reg, mask, val); ++ rc = regmap_update_bits(kcs_aspeed->map, reg, mask, val); + WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc); + } + ++static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) ++{ ++ struct aspeed_kcs_bmc *kcs_aspeed = to_aspeed_kcs_bmc(kcs_bmc); ++ int rc; ++ u8 str; ++ ++ /* We don't have an OBE IRQ, emulate it */ ++ if (mask & KCS_BMC_EVENT_TYPE_OBE) { ++ if (KCS_BMC_EVENT_TYPE_OBE & state) { ++ /* ++ * Given we don't have an OBE IRQ, delay by polling briefly to see if we can ++ * observe such an event before returning to the caller. This is not ++ * incorrect because OBF may have already become clear before enabling the ++ * IRQ if we had one, under which circumstance no event will be propagated ++ * anyway. ++ * ++ * The onus is on the client to perform a race-free check that it hasn't ++ * missed the event. ++ */ ++ rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, ++ !(str & KCS_BMC_STR_OBF), 1, 100, false, ++ &kcs_aspeed->kcs_bmc, kcs_aspeed->kcs_bmc.ioreg.str); ++ /* Time for the slow path? */ ++ if (rc == -ETIMEDOUT) ++ mod_timer(&kcs_aspeed->obe.timer, jiffies + KCS_OBE_POLL_PERIOD); ++ } else { ++ del_timer(&kcs_aspeed->obe.timer); ++ } ++ } ++ ++ if (mask & KCS_BMC_EVENT_TYPE_IBF) { ++ const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF); ++ ++ switch (kcs_aspeed->hw_inst) { ++ case 0: ++ regmap_update_bits(kcs_aspeed->map, HICR2, HICR2_IBFIE1, ++ enable * HICR2_IBFIE1); ++ return; ++ case 1: ++ regmap_update_bits(kcs_aspeed->map, HICR2, HICR2_IBFIE2, ++ enable * HICR2_IBFIE2); ++ return; ++ case 2: ++ regmap_update_bits(kcs_aspeed->map, HICR2, HICR2_IBFIE3, ++ enable * HICR2_IBFIE3); ++ return; ++ case 3: ++ regmap_update_bits(kcs_aspeed->map, HICRB, HICRB_IBFIE4, ++ enable * HICRB_IBFIE4); ++ return; ++ default: ++ pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); ++ return; ++ } ++ } ++} ++ ++static const struct kcs_bmc_device_ops aspeed_kcs_ops = { ++ .io_inputb = aspeed_kcs_inb, ++ .io_outputb = aspeed_kcs_outb, ++ .io_updateb = aspeed_kcs_updateb, ++ .irq_mask_update = aspeed_kcs_irq_mask_update, ++}; ++ + /* +- * We note D for Data, and C for Cmd/Status, default rules are ++ * Follow IPMI v2.0, given a KCS IO base X, ++ * the Data and Cmd/Status IO addresses are X and X+1. + * +- * 1. Only the D address is given: +- * A. KCS1/KCS2 (D/C: X/X+4) +- * D/C: CA0h/CA4h +- * D/C: CA8h/CACh +- * B. KCS3 (D/C: XX2/XX3h) +- * D/C: CA2h/CA3h +- * C. KCS4 (D/C: X/X+1) +- * D/C: CA4h/CA5h +- * +- * 2. Both the D/C addresses are given: +- * A. KCS1/KCS2/KCS4 (D/C: X/Y) +- * D/C: CA0h/CA1h +- * D/C: CA8h/CA9h +- * D/C: CA4h/CA5h +- * B. KCS3 (D/C: XX2/XX3h) +- * D/C: CA2h/CA3h ++ * Note that the IO base of KCS channel 3/7/11/... must ends with 2 ++ * e.g. CA2h for KCS#3 + */ +-static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs) ++static int aspeed_kcs_config_io_address(struct aspeed_kcs_bmc *kcs_aspeed) + { +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); ++ u32 io_addr; + +- if (WARN_ON(nr_addrs < 1 || nr_addrs > 2)) +- return -EINVAL; ++ io_addr = kcs_aspeed->io_addr; + +- switch (priv->kcs_bmc.channel) { ++ switch (kcs_aspeed->hw_inst) { ++ case 0: ++ regmap_update_bits(kcs_aspeed->map, HICR4, HICR4_LADR12AS, 0); ++ regmap_write(kcs_aspeed->map, LADR12H, io_addr >> 8); ++ regmap_write(kcs_aspeed->map, LADR12L, io_addr & 0xFF); ++ regmap_update_bits(kcs_aspeed->map, LSADR12, LSADR12_LSADR1, ++ FIELD_PREP(LSADR12_LSADR1, io_addr + 1)); ++ regmap_update_bits(kcs_aspeed->map, HICRB, HICRB_EN16LADR1, ++ HICRB_EN16LADR1); ++ break; + case 1: +- regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0); +- regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); +- regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); +- if (nr_addrs == 2) { +- regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK, +- addrs[1] << LPC_LSADR12_LSADR1_SHIFT); +- +- regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1, +- LPC_HICRB_EN16LADR1); +- } ++ regmap_update_bits(kcs_aspeed->map, HICR4, HICR4_LADR12AS, HICR4_LADR12AS); ++ regmap_write(kcs_aspeed->map, LADR12H, io_addr >> 8); ++ regmap_write(kcs_aspeed->map, LADR12L, io_addr & 0xFF); ++ regmap_update_bits(kcs_aspeed->map, LSADR12, LSADR12_LSADR2, ++ FIELD_PREP(LSADR12_LSADR2, io_addr + 1)); ++ regmap_update_bits(kcs_aspeed->map, HICRB, HICRB_EN16LADR2, ++ HICRB_EN16LADR2); + break; +- + case 2: +- regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS); +- regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8); +- regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF); +- if (nr_addrs == 2) { +- regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK, +- addrs[1] << LPC_LSADR12_LSADR2_SHIFT); +- +- regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2, +- LPC_HICRB_EN16LADR2); +- } ++ regmap_write(kcs_aspeed->map, LADR3H, io_addr >> 8); ++ regmap_write(kcs_aspeed->map, LADR3L, io_addr & 0xFF); + break; +- + case 3: +- if (nr_addrs == 2) { +- dev_err(priv->kcs_bmc.dev, +- "Channel 3 only supports inferred status IO address\n"); +- return -EINVAL; +- } +- +- regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8); +- regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF); ++ regmap_write(kcs_aspeed->map, LADR4, ((io_addr + 1) << 16) | io_addr); + break; +- +- case 4: +- if (nr_addrs == 1) +- regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]); +- else +- regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]); +- +- break; +- + default: + return -EINVAL; + } +@@ -285,398 +309,283 @@ + return 0; + } + +-static inline int aspeed_kcs_map_serirq_type(u32 dt_type) +-{ +- switch (dt_type) { +- case IRQ_TYPE_EDGE_RISING: +- return LPC_TYIRQX_RISING; +- case IRQ_TYPE_LEVEL_HIGH: +- return LPC_TYIRQX_HIGH; +- case IRQ_TYPE_LEVEL_LOW: +- return LPC_TYIRQX_LOW; +- default: +- return -EINVAL; +- } +-} +- +-static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type) ++static int aspeed_kcs_config_upstream_serirq(struct aspeed_kcs_bmc *kcs_aspeed) + { +- unsigned int mask, val, hw_type; +- int ret; ++ unsigned int mask, val; ++ u32 sirq_id, sirq_type; + +- if (id > 15) +- return -EINVAL; +- +- ret = aspeed_kcs_map_serirq_type(dt_type); +- if (ret < 0) +- return ret; +- hw_type = ret; ++ if (kcs_aspeed->sirq.type == IRQ_TYPE_NONE) ++ return 0; + +- priv->upstream_irq.mode = aspeed_kcs_irq_serirq; +- priv->upstream_irq.id = id; ++ sirq_id = kcs_aspeed->sirq.id; ++ sirq_type = kcs_aspeed->sirq.type; + +- switch (priv->kcs_bmc.channel) { +- case 1: ++ switch (kcs_aspeed->hw_inst) { ++ case 0: + /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */ + break; ++ case 1: ++ mask = HICR5_SEL2IRQX | HICR5_ID2IRQX; ++ val = FIELD_PREP(HICR5_ID2IRQX, sirq_id); ++ val |= (sirq_type == IRQ_TYPE_LEVEL_HIGH) ? HICR5_SEL2IRQX : 0; ++ regmap_update_bits(kcs_aspeed->map, HICR5, mask, val); ++ break; + case 2: +- if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) +- return -EINVAL; +- +- mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK; +- val = (id << LPC_HICR5_ID2IRQX_SHIFT); +- val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0; +- regmap_update_bits(priv->map, LPC_HICR5, mask, val); +- ++ mask = HICR5_SEL3IRQX | HICR5_ID3IRQX; ++ val = FIELD_PREP(HICR5_ID3IRQX, sirq_id); ++ val |= (sirq_type == IRQ_TYPE_LEVEL_HIGH) ? HICR5_SEL3IRQX : 0; ++ regmap_update_bits(kcs_aspeed->map, HICR5, mask, val); + break; + case 3: +- if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH)) +- return -EINVAL; +- +- mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK; +- val = (id << LPC_HICR5_ID3IRQX_SHIFT); +- val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0; +- regmap_update_bits(priv->map, LPC_HICR5, mask, val); +- +- break; +- case 4: +- mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR; +- val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT); +- regmap_update_bits(priv->map, LPC_HICRC, mask, val); ++ mask = HICRC_ID4IRQX | HICRC_SEL4IRQX | HICRC_OBF4_AUTO_CLR; ++ val = FIELD_PREP(HICRC_ID4IRQX, sirq_id); ++ val |= (sirq_type == IRQ_TYPE_LEVEL_HIGH) ? HICRC_SEL4IRQX : 0; ++ regmap_update_bits(kcs_aspeed->map, HICRC, mask, val); + break; + default: +- dev_warn(priv->kcs_bmc.dev, ++ dev_warn(kcs_aspeed->kcs_bmc.dev, + "SerIRQ configuration not supported on KCS channel %d\n", +- priv->kcs_bmc.channel); ++ kcs_aspeed->kcs_bmc.channel); + return -EINVAL; + } + + return 0; + } + +-static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable) ++static int aspeed_kcs_enable_channel(struct aspeed_kcs_bmc *kcs_aspeed, bool enable) + { +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); +- +- switch (kcs_bmc->channel) { ++ switch (kcs_aspeed->hw_inst) { ++ case 0: ++ regmap_update_bits(kcs_aspeed->map, HICR0, HICR0_LPC1E, enable * HICR0_LPC1E); ++ break; + case 1: +- regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E); +- return; ++ regmap_update_bits(kcs_aspeed->map, HICR0, HICR0_LPC2E, enable * HICR0_LPC2E); ++ break; + case 2: +- regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E); +- return; ++ regmap_update_bits(kcs_aspeed->map, HICR0, HICR0_LPC3E, enable * HICR0_LPC3E); ++ regmap_update_bits(kcs_aspeed->map, HICR4, HICR4_KCSENBL, enable * HICR4_KCSENBL); ++ break; + case 3: +- regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E); +- regmap_update_bits(priv->map, LPC_HICR4, +- LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL); +- return; +- case 4: +- regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E); +- return; ++ regmap_update_bits(kcs_aspeed->map, HICRB, HICRB_LPC4E, enable * HICRB_LPC4E); ++ break; + default: +- pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); +- return; ++ return -EINVAL; + } ++ ++ return 0; + } + + static void aspeed_kcs_check_obe(struct timer_list *timer) + { +- struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer); ++ struct aspeed_kcs_bmc *kcs_aspeed = container_of(timer, struct aspeed_kcs_bmc, obe.timer); + unsigned long flags; + u8 str; + +- spin_lock_irqsave(&priv->obe.lock, flags); +- if (priv->obe.remove) { +- spin_unlock_irqrestore(&priv->obe.lock, flags); ++ spin_lock_irqsave(&kcs_aspeed->obe.lock, flags); ++ if (kcs_aspeed->obe.remove) { ++ spin_unlock_irqrestore(&kcs_aspeed->obe.lock, flags); + return; + } + +- str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str); ++ str = aspeed_kcs_inb(&kcs_aspeed->kcs_bmc, kcs_aspeed->kcs_bmc.ioreg.str); + if (str & KCS_BMC_STR_OBF) { +- mod_timer(timer, jiffies + OBE_POLL_PERIOD); +- spin_unlock_irqrestore(&priv->obe.lock, flags); ++ mod_timer(timer, jiffies + KCS_OBE_POLL_PERIOD); ++ spin_unlock_irqrestore(&kcs_aspeed->obe.lock, flags); + return; + } +- spin_unlock_irqrestore(&priv->obe.lock, flags); +- +- kcs_bmc_handle_event(&priv->kcs_bmc); +-} +- +-static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state) +-{ +- struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc); +- int rc; +- u8 str; +- +- /* We don't have an OBE IRQ, emulate it */ +- if (mask & KCS_BMC_EVENT_TYPE_OBE) { +- if (KCS_BMC_EVENT_TYPE_OBE & state) { +- /* +- * Given we don't have an OBE IRQ, delay by polling briefly to see if we can +- * observe such an event before returning to the caller. This is not +- * incorrect because OBF may have already become clear before enabling the +- * IRQ if we had one, under which circumstance no event will be propagated +- * anyway. +- * +- * The onus is on the client to perform a race-free check that it hasn't +- * missed the event. +- */ +- rc = read_poll_timeout_atomic(aspeed_kcs_inb, str, +- !(str & KCS_BMC_STR_OBF), 1, 100, false, +- &priv->kcs_bmc, priv->kcs_bmc.ioreg.str); +- /* Time for the slow path? */ +- if (rc == -ETIMEDOUT) +- mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD); +- } else { +- del_timer(&priv->obe.timer); +- } +- } ++ spin_unlock_irqrestore(&kcs_aspeed->obe.lock, flags); + +- if (mask & KCS_BMC_EVENT_TYPE_IBF) { +- const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF); +- +- switch (kcs_bmc->channel) { +- case 1: +- regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1, +- enable * LPC_HICR2_IBFIE1); +- return; +- case 2: +- regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2, +- enable * LPC_HICR2_IBFIE2); +- return; +- case 3: +- regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3, +- enable * LPC_HICR2_IBFIE3); +- return; +- case 4: +- regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4, +- enable * LPC_HICRB_IBFIE4); +- return; +- default: +- pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel); +- return; +- } +- } ++ kcs_bmc_handle_event(&kcs_aspeed->kcs_bmc); + } + +-static const struct kcs_bmc_device_ops aspeed_kcs_ops = { +- .irq_mask_update = aspeed_kcs_irq_mask_update, +- .io_inputb = aspeed_kcs_inb, +- .io_outputb = aspeed_kcs_outb, +- .io_updateb = aspeed_kcs_updateb, +-}; +- +-static irqreturn_t aspeed_kcs_irq(int irq, void *arg) ++static irqreturn_t aspeed_kcs_isr(int irq, void *arg) + { + struct kcs_bmc_device *kcs_bmc = arg; + + return kcs_bmc_handle_event(kcs_bmc); + } + +-static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc, +- struct platform_device *pdev) ++static int aspeed_kcs_probe(struct platform_device *pdev) + { +- struct device *dev = &pdev->dev; +- int irq; ++ struct aspeed_kcs_bmc *kcs_aspeed; ++ struct kcs_bmc_device *kcs_bmc; ++ struct device *dev; ++ const __be32 *reg; ++ int i, rc, chan; + +- irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; +- +- return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED, +- dev_name(dev), kcs_bmc); +-} ++ dev = &pdev->dev; + +-static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = { +- { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 }, +- { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 }, +- { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 }, +- { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 }, +-}; ++ kcs_aspeed = devm_kzalloc(dev, sizeof(*kcs_aspeed), GFP_KERNEL); ++ if (!kcs_aspeed) ++ return -ENOMEM; + +-static int aspeed_kcs_of_get_channel(struct platform_device *pdev) +-{ +- struct device_node *np; +- struct kcs_ioreg ioreg; +- const __be32 *reg; +- int i; ++ kcs_bmc = &kcs_aspeed->kcs_bmc; ++ kcs_bmc->ops = &aspeed_kcs_ops; ++ kcs_bmc->dev = dev; + +- np = pdev->dev.of_node; ++ kcs_aspeed->map = syscon_node_to_regmap(pdev->dev.parent->of_node); ++ if (IS_ERR(kcs_aspeed->map)) { ++ dev_err(&pdev->dev, "cannot get regmap\n"); ++ return -ENODEV; ++ } + +- /* Don't translate addresses, we want offsets for the regmaps */ +- reg = of_get_address(np, 0, NULL, NULL); +- if (!reg) +- return -EINVAL; +- ioreg.idr = be32_to_cpup(reg); ++ kcs_aspeed->irq = platform_get_irq(pdev, 0); ++ if (kcs_aspeed->irq < 0) { ++ dev_err(dev, "cannot get IRQ number\n"); ++ return kcs_aspeed->irq; ++ } + +- reg = of_get_address(np, 1, NULL, NULL); +- if (!reg) +- return -EINVAL; +- ioreg.odr = be32_to_cpup(reg); ++ reg = of_get_address(dev->of_node, 0, NULL, NULL); ++ if (!reg) { ++ dev_err(dev, "cannot get IDR\n"); ++ return -ENODEV; ++ } + +- reg = of_get_address(np, 2, NULL, NULL); +- if (!reg) +- return -EINVAL; +- ioreg.str = be32_to_cpup(reg); ++ kcs_bmc->ioreg.idr = be32_to_cpup(reg); + +- for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) { +- if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg))) +- return i + 1; ++ reg = of_get_address(dev->of_node, 1, NULL, NULL); ++ if (!reg) { ++ dev_err(dev, "cannot get ODR\n"); ++ return -ENODEV; + } +- return -EINVAL; +-} + +-static int +-aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2]) +-{ +- int rc; ++ kcs_bmc->ioreg.odr = be32_to_cpup(reg); + +- rc = of_property_read_variable_u32_array(pdev->dev.of_node, +- "aspeed,lpc-io-reg", +- addrs, 1, 2); +- if (rc < 0) { +- dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n"); +- return rc; ++ reg = of_get_address(dev->of_node, 2, NULL, NULL); ++ if (!reg) { ++ dev_err(dev, "cannot get STR\n"); ++ return -ENODEV; + } + +- if (addrs[0] > 0xffff) { +- dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n"); +- return -EINVAL; ++ kcs_bmc->ioreg.str = be32_to_cpup(reg); ++ ++ for (i = 0; i < KCS_HW_INSTANCE_NUM; ++i) { ++ if (aspeed_kcs_ioregs[i].idr == kcs_bmc->ioreg.idr && ++ aspeed_kcs_ioregs[i].odr == kcs_bmc->ioreg.odr && ++ aspeed_kcs_ioregs[i].str == kcs_bmc->ioreg.str) { ++ kcs_aspeed->hw_inst = i; ++ break; ++ } + } + +- if (rc == 2 && addrs[1] > 0xffff) { +- dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n"); ++ if (i >= KCS_HW_INSTANCE_NUM) { ++ dev_err(dev, "invalid IDR/ODR/STR register\n"); + return -EINVAL; + } + +- return rc; +-} +- +-static int aspeed_kcs_probe(struct platform_device *pdev) +-{ +- struct kcs_bmc_device *kcs_bmc; +- struct aspeed_kcs_bmc *priv; +- struct device_node *np; +- bool have_upstream_irq; +- u32 upstream_irq[2]; +- int rc, channel; +- int nr_addrs; +- u32 addrs[2]; +- +- np = pdev->dev.of_node->parent; +- if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") && +- !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") && +- !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) { +- dev_err(&pdev->dev, "unsupported LPC device binding\n"); ++ rc = of_property_read_u32(dev->of_node, "kcs-io-addr", &kcs_aspeed->io_addr); ++ if (rc || kcs_aspeed->io_addr > (USHRT_MAX - 1)) { ++ dev_err(dev, "invalid IO address\n"); + return -ENODEV; + } + +- channel = aspeed_kcs_of_get_channel(pdev); +- if (channel < 0) +- return channel; +- +- nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs); +- if (nr_addrs < 0) +- return nr_addrs; +- +- np = pdev->dev.of_node; +- rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2); +- if (rc && rc != -EINVAL) +- return -EINVAL; +- +- have_upstream_irq = !rc; ++ rc = of_property_read_u32(dev->of_node, "kcs-channel", &chan); ++ if (rc) { ++ chan = ida_alloc(&aspeed_kcs_bmc_ida, GFP_KERNEL); ++ if (chan < 0) { ++ dev_err(dev, "cannot allocate ID for KCS channel\n"); ++ return chan; ++ } ++ } + +- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); +- if (!priv) +- return -ENOMEM; ++ kcs_bmc->channel = chan; + +- kcs_bmc = &priv->kcs_bmc; +- kcs_bmc->dev = &pdev->dev; +- kcs_bmc->channel = channel; +- kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1]; +- kcs_bmc->ops = &aspeed_kcs_ops; ++ rc = of_property_read_u32_array(dev->of_node, "kcs-upstream-serirq", (u32 *)&kcs_aspeed->sirq, 2); ++ if (rc) { ++ kcs_aspeed->sirq.type = IRQ_TYPE_NONE; ++ } else { ++ if (kcs_aspeed->sirq.id > 15) { ++ dev_err(dev, "invalid SerIRQ number, expected sirq <= 15\n"); ++ return -EINVAL; ++ } + +- priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node); +- if (IS_ERR(priv->map)) { +- dev_err(&pdev->dev, "Couldn't get regmap\n"); +- return -ENODEV; ++ if (kcs_aspeed->sirq.type != IRQ_TYPE_LEVEL_HIGH && ++ kcs_aspeed->sirq.type != IRQ_TYPE_LEVEL_LOW) { ++ dev_err(dev, "invalid SerIRQ type, expected IRQ_TYPE_LEVEL_HIGH/LOW only\n"); ++ return -EINVAL; ++ } + } + +- spin_lock_init(&priv->obe.lock); +- priv->obe.remove = false; +- timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0); ++ timer_setup(&kcs_aspeed->obe.timer, aspeed_kcs_check_obe, 0); ++ spin_lock_init(&kcs_aspeed->obe.lock); ++ kcs_aspeed->obe.remove = false; + +- rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs); ++ aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); ++ ++ rc = aspeed_kcs_config_io_address(kcs_aspeed); + if (rc) + return rc; + +- /* Host to BMC IRQ */ +- rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev); ++ rc = aspeed_kcs_config_upstream_serirq(kcs_aspeed); + if (rc) + return rc; + +- /* BMC to Host IRQ */ +- if (have_upstream_irq) { +- rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]); +- if (rc < 0) +- return rc; +- } else { +- priv->upstream_irq.mode = aspeed_kcs_irq_none; ++ rc = devm_request_irq(dev, kcs_aspeed->irq, aspeed_kcs_isr, IRQF_SHARED, ++ dev_name(dev), kcs_bmc); ++ if (rc) { ++ dev_err(dev, "cannot request IRQ\n"); ++ return rc; + } + +- platform_set_drvdata(pdev, priv); ++ platform_set_drvdata(pdev, kcs_aspeed); + +- aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); +- aspeed_kcs_enable_channel(kcs_bmc, true); ++ rc = aspeed_kcs_enable_channel(kcs_aspeed, true); ++ if (rc) { ++ dev_err(dev, "cannot enable channel %d: %d\n", ++ kcs_bmc->channel, rc); ++ return rc; ++ } + +- rc = kcs_bmc_add_device(&priv->kcs_bmc); ++ rc = kcs_bmc_add_device(kcs_bmc); + if (rc) { +- dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc); ++ dev_warn(dev, "cannot register channel %d: %d\n", ++ kcs_bmc->channel, rc); + return rc; + } + +- dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n", +- kcs_bmc->channel, addrs[0]); ++ dev_info(dev, "Initialised channel %d at IO address 0x%x\n", ++ kcs_bmc->channel, kcs_aspeed->io_addr); + + return 0; + } + + static void aspeed_kcs_remove(struct platform_device *pdev) + { +- struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev); +- struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc; ++ struct aspeed_kcs_bmc *kcs_aspeed = platform_get_drvdata(pdev); ++ struct kcs_bmc_device *kcs_bmc = &kcs_aspeed->kcs_bmc; + + kcs_bmc_remove_device(kcs_bmc); + +- aspeed_kcs_enable_channel(kcs_bmc, false); ++ aspeed_kcs_enable_channel(kcs_aspeed, false); + aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0); + + /* Make sure it's proper dead */ +- spin_lock_irq(&priv->obe.lock); +- priv->obe.remove = true; +- spin_unlock_irq(&priv->obe.lock); +- del_timer_sync(&priv->obe.timer); ++ spin_lock_irq(&kcs_aspeed->obe.lock); ++ kcs_aspeed->obe.remove = true; ++ spin_unlock_irq(&kcs_aspeed->obe.lock); ++ del_timer_sync(&kcs_aspeed->obe.timer); + } + +-static const struct of_device_id ast_kcs_bmc_match[] = { +- { .compatible = "aspeed,ast2400-kcs-bmc-v2" }, +- { .compatible = "aspeed,ast2500-kcs-bmc-v2" }, ++static const struct of_device_id aspeed_kcs_bmc_match[] = { ++ { .compatible = "aspeed,ast2400-kcs-bmc" }, ++ { .compatible = "aspeed,ast2500-kcs-bmc" }, + { .compatible = "aspeed,ast2600-kcs-bmc" }, + { } + }; +-MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match); ++MODULE_DEVICE_TABLE(of, aspeed_kcs_bmc_match); + +-static struct platform_driver ast_kcs_bmc_driver = { ++static struct platform_driver aspeed_kcs_bmc_driver = { + .driver = { +- .name = DEVICE_NAME, +- .of_match_table = ast_kcs_bmc_match, ++ .name = DEVICE_NAME, ++ .of_match_table = aspeed_kcs_bmc_match, + }, +- .probe = aspeed_kcs_probe, +- .remove_new = aspeed_kcs_remove, ++ .probe = aspeed_kcs_probe, ++ .remove = aspeed_kcs_remove, + }; +-module_platform_driver(ast_kcs_bmc_driver); ++module_platform_driver(aspeed_kcs_bmc_driver); + + MODULE_LICENSE("GPL v2"); + MODULE_AUTHOR("Haiyue Wang "); + MODULE_AUTHOR("Andrew Jeffery "); ++MODULE_AUTHOR("Chia-Wei Wang "); + MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device"); +diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig +--- a/drivers/clk/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/clk/Kconfig 2025-12-23 10:16:07.639258736 +0000 +@@ -277,6 +277,20 @@ + The G4 and G5 series, including the ast2400 and ast2500, are supported + by this driver. + ++config COMMON_CLK_AST2700 ++ bool "Clock driver for AST2700 SoC" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ help ++ This driver provides support for clock on AST2700 SoC. ++ The driver is responsible for managing the various clocks required ++ by the peripherals and cores within the AST2700. ++ ++config COMMON_CLK_AST1700 ++ bool "Clock driver for AST1700" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ help ++ This driver supports the AST1700 clocks on the Aspeed BMC platforms. ++ + config COMMON_CLK_S2MPS11 + tristate "Clock driver for S2MPS1X/S5M8767 MFD" + depends on MFD_SEC_CORE || COMPILE_TEST +@@ -342,6 +356,14 @@ + This driver supports the clocking features of the Cirrus Logic + Lochnagar audio development board. + ++config COMMON_CLK_NPCM8XX ++ tristate "Clock driver for the NPCM8XX SoC Family" ++ depends on ARCH_NPCM || COMPILE_TEST ++ help ++ This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family, ++ all the clocks are initialized by the bootloader, so this driver ++ allows only reading of current settings directly from the hardware. ++ + config COMMON_CLK_LOONGSON2 + bool "Clock driver for Loongson-2 SoC" + depends on LOONGARCH || COMPILE_TEST +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +--- a/drivers/clk/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/clk/Makefile 2025-12-23 10:16:11.218198718 +0000 +@@ -48,6 +48,8 @@ + obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o + obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o + obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o ++obj-$(CONFIG_COMMON_CLK_AST2700) += clk-ast2700.o ++obj-$(CONFIG_COMMON_CLK_AST1700) += clk-ast1700.o + obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o + obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o + obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o +@@ -62,6 +64,7 @@ + obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o + obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o + obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o ++obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o + obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o + obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o + obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o +diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c +--- a/drivers/clk/clk-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/clk/clk-aspeed.c 2025-12-23 10:16:20.650040614 +0000 +@@ -54,15 +54,15 @@ + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ + [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, + [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ +- [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ ++ [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, CLK_IS_CRITICAL }, /* LPC */ + [ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */ + [ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", NULL, 0 }, /* GFX CRT */ + [ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */ + [ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */ +- [ASPEED_CLK_GATE_UART1CLK] = { 15, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */ +- [ASPEED_CLK_GATE_UART2CLK] = { 16, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */ ++ [ASPEED_CLK_GATE_UART1CLK] = { 15, -1, "uart1clk-gate", "uart", CLK_IS_CRITICAL }, /* UART1 */ ++ [ASPEED_CLK_GATE_UART2CLK] = { 16, -1, "uart2clk-gate", "uart", CLK_IS_CRITICAL }, /* UART2 */ + [ASPEED_CLK_GATE_UART5CLK] = { 17, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */ +- [ASPEED_CLK_GATE_ESPICLK] = { 19, -1, "espiclk-gate", NULL, 0 }, /* eSPI */ ++ [ASPEED_CLK_GATE_ESPICLK] = { 19, -1, "espiclk-gate", NULL, CLK_IS_CRITICAL }, /* eSPI */ + [ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac", 0 }, /* MAC1 */ + [ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac", 0 }, /* MAC2 */ + [ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */ +@@ -278,6 +278,7 @@ + [ASPEED_RESET_PECI] = 10, + [ASPEED_RESET_I2C] = 2, + [ASPEED_RESET_AHB] = 1, ++ [ASPEED_RESET_VIDEO] = 6, + + /* + * SCUD4 resets start at an offset to separate them from +diff --git a/drivers/clk/clk-ast1700.c b/drivers/clk/clk-ast1700.c +--- a/drivers/clk/clk-ast1700.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/clk/clk-ast1700.c 2025-12-23 10:16:20.655040530 +0000 +@@ -0,0 +1,807 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright ASPEED Technology ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define AST1700_CLK_25MHZ 25000000 ++#define AST1700_CLK_24MHZ 24000000 ++#define AST1700_CLK_192MHZ 192000000 ++/* IO Die */ ++#define AST1700_CLK_STOP 0x240 ++#define AST1700_CLK_STOP2 0x260 ++#define AST1700_CLK_SEL1 0x280 ++#define AST1700_CLK_SEL2 0x284 ++#define UXCLK_MASK GENMASK(1, 0) ++#define HUXCLK_MASK GENMASK(4, 3) ++#define AST1700_HPLL_PARAM 0x300 ++#define AST1700_APLL_PARAM 0x310 ++#define AST1700_DPLL_PARAM 0x320 ++#define AST1700_UXCLK_CTRL 0x330 ++#define AST1700_HUXCLK_CTRL 0x334 ++ ++static DEFINE_IDA(ast1700_clk_ida); ++ ++/* Globally visible clocks */ ++static DEFINE_SPINLOCK(ast1700_clk_lock); ++ ++/* Division of RGMII Clock */ ++static const struct clk_div_table ast1700_rgmii_div_table[] = { ++ { 0x0, 4 }, ++ { 0x1, 4 }, ++ { 0x2, 6 }, ++ { 0x3, 8 }, ++ { 0x4, 10 }, ++ { 0x5, 12 }, ++ { 0x6, 14 }, ++ { 0x7, 16 }, ++ { 0 } ++}; ++ ++/* Division of RMII Clock */ ++static const struct clk_div_table ast1700_rmii_div_table[] = { ++ { 0x0, 8 }, ++ { 0x1, 8 }, ++ { 0x2, 12 }, ++ { 0x3, 16 }, ++ { 0x4, 20 }, ++ { 0x5, 24 }, ++ { 0x6, 28 }, ++ { 0x7, 32 }, ++ { 0 } ++}; ++ ++/* Division of HCLK/SDIO/MAC/apll_divn CLK */ ++static const struct clk_div_table ast1700_clk_div_table[] = { ++ { 0x0, 2 }, ++ { 0x1, 2 }, ++ { 0x2, 3 }, ++ { 0x3, 4 }, ++ { 0x4, 5 }, ++ { 0x5, 6 }, ++ { 0x6, 7 }, ++ { 0x7, 8 }, ++ { 0 } ++}; ++ ++/* Division of PCLK/EMMC CLK */ ++static const struct clk_div_table ast1700_clk_div_table2[] = { ++ { 0x0, 2 }, ++ { 0x1, 4 }, ++ { 0x2, 6 }, ++ { 0x3, 8 }, ++ { 0x4, 10 }, ++ { 0x5, 12 }, ++ { 0x6, 14 }, ++ { 0x7, 16 }, ++ { 0 } ++}; ++ ++static struct clk_hw *AST1700_calc_uclk(const char *name, u32 val) ++{ ++ unsigned int mult, div; ++ ++ /* UARTCLK = UXCLK * R / (N * 2) */ ++ u32 r = val & 0xff; ++ u32 n = (val >> 8) & 0x3ff; ++ ++ mult = r; ++ div = n * 2; ++ ++ return clk_hw_register_fixed_factor(NULL, name, "ast1700-uxclk", 0, mult, div); ++}; ++ ++static struct clk_hw *AST1700_calc_huclk(const char *name, u32 val) ++{ ++ unsigned int mult, div; ++ ++ /* UARTCLK = UXCLK * R / (N * 2) */ ++ u32 r = val & 0xff; ++ u32 n = (val >> 8) & 0x3ff; ++ ++ mult = r; ++ div = n * 2; ++ ++ return clk_hw_register_fixed_factor(NULL, name, "ast1700-huxclk", 0, mult, div); ++}; ++ ++static struct clk_hw *AST1700_calc_pll(const char *name, const char *parent_name, u32 val) ++{ ++ unsigned int mult, div; ++ ++ if (val & BIT(24)) { ++ /* Pass through mode */ ++ mult = 1; ++ div = 1; ++ } else { ++ /* F = 25Mhz * [(M + 1) / (n + 1)] / (p + 1) */ ++ u32 m = val & 0x1fff; ++ u32 n = (val >> 13) & 0x3f; ++ u32 p = (val >> 19) & 0xf; ++ ++ mult = (m + 1) / (n + 1); ++ div = (p + 1); ++ } ++ return clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mult, div); ++} ++ ++static int AST1700_clk_is_enabled(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ u32 reg; ++ ++ reg = readl(gate->reg); ++ ++ return !(reg & clk); ++} ++ ++static int AST1700_clk_enable(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ ++ if (readl(gate->reg) & clk) ++ writel(clk, gate->reg + 0x04); ++ ++ return 0; ++} ++ ++static void AST1700_clk_disable(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ ++ /* Clock is set to enable, so use write to set register */ ++ writel(clk, gate->reg); ++} ++ ++static const struct clk_ops AST1700_clk_gate_ops = { ++ .enable = AST1700_clk_enable, ++ .disable = AST1700_clk_disable, ++ .is_enabled = AST1700_clk_is_enabled, ++}; ++ ++static struct clk_hw *AST1700_clk_hw_register_gate(struct device *dev, const char *name, ++ const char *parent_name, unsigned long flags, ++ void __iomem *reg, u8 clock_idx, ++ u8 clk_gate_flags, spinlock_t *lock) ++{ ++ struct clk_gate *gate; ++ struct clk_hw *hw; ++ struct clk_init_data init; ++ int ret = -EINVAL; ++ ++ gate = kzalloc(sizeof(*gate), GFP_KERNEL); ++ if (!gate) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ init.ops = &AST1700_clk_gate_ops; ++ init.flags = flags; ++ init.parent_names = parent_name ? &parent_name : NULL; ++ init.num_parents = parent_name ? 1 : 0; ++ ++ gate->reg = reg; ++ gate->bit_idx = clock_idx; ++ gate->flags = clk_gate_flags; ++ gate->lock = lock; ++ gate->hw.init = &init; ++ ++ hw = &gate->hw; ++ ret = clk_hw_register(dev, hw); ++ if (ret) { ++ kfree(gate); ++ hw = ERR_PTR(ret); ++ } ++ ++ return hw; ++} ++ ++struct ast1700_reset { ++ void __iomem *base; ++ struct reset_controller_dev rcdev; ++}; ++ ++#define to_rc_data(p) container_of(p, struct ast1700_reset, rcdev) ++ ++static int ast1700_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct ast1700_reset *rc = to_rc_data(rcdev); ++ u32 rst = BIT(id % 32); ++ u32 reg = id >= 32 ? 0x220 : 0x200; ++ ++ writel(rst, rc->base + reg); ++ return 0; ++} ++ ++static int ast1700_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct ast1700_reset *rc = to_rc_data(rcdev); ++ u32 rst = BIT(id % 32); ++ u32 reg = id >= 32 ? 0x220 : 0x200; ++ ++ /* Use set to clear register */ ++ writel(rst, rc->base + reg + 0x04); ++ return 0; ++} ++ ++static int ast1700_reset_status(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct ast1700_reset *rc = to_rc_data(rcdev); ++ u32 rst = BIT(id % 32); ++ u32 reg = id >= 32 ? 0x220 : 0x200; ++ ++ return (readl(rc->base + reg) & rst); ++} ++ ++static const struct reset_control_ops ast1700_reset_ops = { ++ .assert = ast1700_reset_assert, ++ .deassert = ast1700_reset_deassert, ++ .status = ast1700_reset_status, ++}; ++ ++static const char *const sdclk_sel0[] = { ++ "ast1700_0-hpll_divn", ++ "ast1700_0-apll_divn", ++}; ++ ++static const char *const sdclk_sel1[] = { ++ "ast1700_1-hpll_divn", ++ "ast1700_1-apll_divn", ++}; ++ ++static const char *const uartclk_sel0[] = { ++ "ast1700_0-uartxclk", ++ "ast1700_0-huartxclk", ++}; ++ ++static const char *const uartclk_sel1[] = { ++ "ast1700_1-uartxclk", ++ "ast1700_1-huartxclk", ++}; ++ ++static const char *const uxclk_sel0[] = { ++ "ast1700_0-apll_div4", ++ "ast1700_0-apll_div2", ++ "ast1700_0-apll", ++ "ast1700_0-hpll", ++}; ++ ++static const char *const uxclk_sel1[] = { ++ "ast1700_1-apll_div4", ++ "ast1700_1-apll_div2", ++ "ast1700_1-apll", ++ "ast1700_1-hpll", ++}; ++ ++#define CREATE_CLK_NAME(id, suffix) kasprintf(GFP_KERNEL, "ast1700_%d-%s", id, suffix) ++ ++static int AST1700_clk_init(struct device_node *ast1700_node) ++{ ++ struct clk_hw_onecell_data *clk_data; ++ struct ast1700_reset *reset; ++ u32 uart_clk_source = 0; ++ void __iomem *clk_base; ++ struct clk_hw **clks; ++ struct clk_hw *hw; ++ u32 val; ++ int ret; ++ ++ int id = ida_simple_get(&ast1700_clk_ida, 0, 0, GFP_KERNEL); ++ ++ clk_base = of_iomap(ast1700_node, 0); ++ WARN_ON(!clk_base); ++ ++ clk_data = kzalloc(struct_size(clk_data, hws, AST1700_NUM_CLKS), GFP_KERNEL); ++ if (!clk_data) ++ return -ENOMEM; ++ ++ clk_data->num = AST1700_NUM_CLKS; ++ clks = clk_data->hws; ++ ++ reset = kzalloc(sizeof(*reset), GFP_KERNEL); ++ if (!reset) ++ return -ENOMEM; ++ ++ reset->base = clk_base; ++ ++ reset->rcdev.owner = THIS_MODULE; ++ reset->rcdev.nr_resets = AST1700_RESET_NUMS; ++ reset->rcdev.ops = &ast1700_reset_ops; ++ reset->rcdev.of_node = ast1700_node; ++ ++ ret = reset_controller_register(&reset->rcdev); ++ if (ret) { ++ pr_err("soc1 failed to register reset controller\n"); ++ return ret; ++ } ++ /* ++ * Ast1700 A0 workaround: ++ * I3C reset should assert all of the I3C controllers simultaneously. ++ * Otherwise, it may lead to failure in accessing I3C registers. ++ */ ++ if (!(readl(clk_base) & BIT(16))) { ++ for (int i = AST1700_RESET_I3C0; i <= AST1700_RESET_I3C15; i++) ++ ast1700_reset_assert(&reset->rcdev, i); ++ } ++ ++ hw = clk_hw_register_fixed_rate(NULL, CREATE_CLK_NAME(id, "clkin"), NULL, 0, AST1700_CLK_25MHZ); ++ if (IS_ERR(hw)) ++ return PTR_ERR(hw); ++ clks[AST1700_CLKIN] = hw; ++ ++ /* HPLL 1000Mhz */ ++ val = readl(clk_base + AST1700_HPLL_PARAM); ++ clks[AST1700_CLK_HPLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "hpll"), CREATE_CLK_NAME(id, "clkin"), val); ++ ++ /* HPLL 800Mhz */ ++ val = readl(clk_base + AST1700_APLL_PARAM); ++ clks[AST1700_CLK_APLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "apll"), CREATE_CLK_NAME(id, "clkin"), val); ++ ++ clks[AST1700_CLK_APLL_DIV2] = ++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "apll_div2"), CREATE_CLK_NAME(id, "apll"), 0, 1, 2); ++ ++ clks[AST1700_CLK_APLL_DIV4] = ++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "apll_div4"), CREATE_CLK_NAME(id, "apll"), 0, 1, 4); ++ ++ val = readl(clk_base + AST1700_DPLL_PARAM); ++ clks[AST1700_CLK_DPLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "dpll"), CREATE_CLK_NAME(id, "clkin"), val); ++ ++ /* uxclk mux selection */ ++ clks[AST1700_CLK_UXCLK] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uxclk"), ++ (id == 0) ? uxclk_sel0 : uxclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uxclk_sel0) : ARRAY_SIZE(uxclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 0, 2, 0, &ast1700_clk_lock); ++ ++ val = readl(clk_base + AST1700_UXCLK_CTRL); ++ clks[AST1700_CLK_UARTX] = AST1700_calc_uclk(CREATE_CLK_NAME(id, "uartxclk"), val); ++ ++ /* huxclk mux selection */ ++ clks[AST1700_CLK_HUXCLK] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "huxclk"), ++ (id == 0) ? uxclk_sel0 : uxclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uxclk_sel0) : ARRAY_SIZE(uxclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 3, 2, 0, &ast1700_clk_lock); ++ ++ val = readl(clk_base + AST1700_HUXCLK_CTRL); ++ clks[AST1700_CLK_HUARTX] = AST1700_calc_huclk(CREATE_CLK_NAME(id, "huartxclk"), val); ++ ++ /* AHB CLK = 200Mhz */ ++ clks[AST1700_CLK_AHB] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "ahb"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 20, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock); ++ ++ /* APB CLK = 100Mhz */ ++ clks[AST1700_CLK_APB] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "apb"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 18, 3, 0, ast1700_clk_div_table2, &ast1700_clk_lock); ++ ++ //rmii ++ clks[AST1700_CLK_RMII] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "rmii"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 21, 3, 0, ast1700_rmii_div_table, &ast1700_clk_lock); ++ ++ //rgmii ++ clks[AST1700_CLK_RGMII] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "rgmii"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 25, 3, 0, ast1700_rgmii_div_table, &ast1700_clk_lock); ++ ++ //mac hclk ++ clks[AST1700_CLK_MACHCLK] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "machclk"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 29, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_LCLK0] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lclk0-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 0, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_LCLK0] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lclk1-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_ESPI0CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "espi0clk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 2, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_ESPI1CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "espi1clk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 3, 0, &ast1700_clk_lock); ++ ++ //sd pll divn ++ clks[AST1700_CLK_HPLL_DIVN] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "hpll_divn"), ++ CREATE_CLK_NAME(id, "hpll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 20, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_APLL_DIVN] = ++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "apll_divn"), ++ CREATE_CLK_NAME(id, "apll"), ++ 0, clk_base + AST1700_CLK_SEL2, ++ 8, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock); ++ ++ //sd clk ++ clks[AST1700_CLK_SDCLK] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "sdclk"), ++ (id == 0) ? sdclk_sel0 : sdclk_sel1, ++ (id == 0) ? ARRAY_SIZE(sdclk_sel0) : ARRAY_SIZE(sdclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 13, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_SDCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "sdclk-gate"), ++ CREATE_CLK_NAME(id, "sdclk"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 4, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_REFCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "io-refclk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 6, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_LPCHCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lpchclk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP, ++ 7, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_MAC0CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac0clk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP, ++ 8, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_MAC1CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac1clk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP, ++ 9, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_MAC2CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac2clk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP, ++ 10, 0, &ast1700_clk_lock); ++ ++ of_property_read_u32(ast1700_node, "uart-clk-source", &uart_clk_source); ++ ++ if (uart_clk_source) { ++ val = readl(clk_base + AST1700_CLK_SEL1) & ~GENMASK(12, 0); ++ uart_clk_source &= GENMASK(12, 0); ++ writel(val | uart_clk_source, clk_base + AST1700_CLK_SEL1); ++ } ++ ++ //UART0 ++ clks[AST1700_CLK_UART0] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart0clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 0, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART0CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart0clk-gate"), ++ CREATE_CLK_NAME(id, "uart0clk"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 11, 0, &ast1700_clk_lock); ++ ++ //UART1 ++ clks[AST1700_CLK_UART1] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart1clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 1, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART1CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart1clk-gate"), ++ CREATE_CLK_NAME(id, "uart1clk"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 12, 0, &ast1700_clk_lock); ++ ++ //UART2 ++ clks[AST1700_CLK_UART2] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart2clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 2, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART2CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart2clk-gate"), ++ CREATE_CLK_NAME(id, "uart2clk"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 13, 0, &ast1700_clk_lock); ++ ++ //UART3 ++ clks[AST1700_CLK_UART3] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart3clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 3, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART3CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart3clk-gate"), ++ CREATE_CLK_NAME(id, "uart3clk"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 14, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C0CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c0clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 16, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C1CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c1clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 17, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C2CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c2clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 18, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C3CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c3clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 19, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C4CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c4clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 20, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C5CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c5clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 21, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C6CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c6clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 22, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C7CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c7clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 23, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C8CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c8clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 24, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C9CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c9clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 25, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C10CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c10clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 26, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C11CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c11clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 27, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C12CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c12clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 28, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C13CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c13clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 29, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C14CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c14clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 30, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_I3C15CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c15clk-gate"), ++ CREATE_CLK_NAME(id, "ahb"), ++ 0, clk_base + AST1700_CLK_STOP, ++ 31, 0, &ast1700_clk_lock); ++ ++ /*clk stop 2 */ ++ //UART5 ++ clks[AST1700_CLK_UART5] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart5clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 5, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART5CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart5clk-gate"), ++ CREATE_CLK_NAME(id, "uart5clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 0, 0, &ast1700_clk_lock); ++ ++ //UART6 ++ clks[AST1700_CLK_UART6] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart6clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 6, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART6CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart6clk-gate"), ++ CREATE_CLK_NAME(id, "uart6clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 1, 0, &ast1700_clk_lock); ++ ++ //UART7 ++ clks[AST1700_CLK_UART7] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart7clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 7, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART7CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart7clk-gate"), ++ CREATE_CLK_NAME(id, "uart7clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 2, 0, &ast1700_clk_lock); ++ ++ //UART8 ++ clks[AST1700_CLK_UART8] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart8clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 8, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART8CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart8clk-gate"), ++ CREATE_CLK_NAME(id, "uart8clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 3, 0, &ast1700_clk_lock); ++ ++ //UART9 ++ clks[AST1700_CLK_UART9] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart9clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 9, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART9CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart9clk-gate"), ++ CREATE_CLK_NAME(id, "uart9clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 4, 0, &ast1700_clk_lock); ++ ++ //UART10 ++ clks[AST1700_CLK_UART10] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart10clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 10, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART10CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart10clk-gate"), ++ CREATE_CLK_NAME(id, "uart10clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 5, 0, &ast1700_clk_lock); ++ ++ //UART11 ++ clks[AST1700_CLK_UART11] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart11clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 11, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART11CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart11clk-gate"), ++ CREATE_CLK_NAME(id, "uart11clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 6, 0, &ast1700_clk_lock); ++ ++ //uart12: call bmc uart ++ clks[AST1700_CLK_UART12] = ++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart12clk"), ++ (id == 0) ? uartclk_sel0 : uartclk_sel1, ++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1), ++ 0, clk_base + AST1700_CLK_SEL1, ++ 12, 1, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_UART12CLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart12clk-gate"), ++ CREATE_CLK_NAME(id, "uart12clk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 7, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_FSICLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "fsiclk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP2, ++ 8, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_LTPIPHYCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "ltpiphyclk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP2, ++ 9, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_LTPICLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "ltpiclk-gate"), NULL, ++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP2, ++ 10, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_VGALCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "vgalclk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP2, ++ 11, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_USBUARTCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "usbuartclk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP2, ++ 12, 0, &ast1700_clk_lock); ++ ++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "canclk"), CREATE_CLK_NAME(id, "apll"), 0, 1, 10); ++ ++ clks[AST1700_CLK_GATE_CANCLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "canclk-gate"), ++ CREATE_CLK_NAME(id, "canclk"), ++ 0, clk_base + AST1700_CLK_STOP2, ++ 13, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_PCICLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "pciclk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP2, ++ 14, 0, &ast1700_clk_lock); ++ ++ clks[AST1700_CLK_GATE_SLICLK] = ++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "sliclk-gate"), NULL, ++ 0, clk_base + AST1700_CLK_STOP2, ++ 15, 0, &ast1700_clk_lock); ++ ++ of_clk_add_hw_provider(ast1700_node, of_clk_hw_onecell_get, clk_data); ++ ++ return 0; ++}; ++ ++CLK_OF_DECLARE_DRIVER(ast1700, "aspeed,ast1700-scu", AST1700_clk_init); ++ +diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c +--- a/drivers/clk/clk-ast2600.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/clk/clk-ast2600.c 2025-12-23 10:16:20.661040429 +0000 +@@ -19,7 +19,7 @@ + * This includes the gates (configured from aspeed_g6_gates), plus the + * explicitly-configured clocks (ASPEED_CLK_HPLL and up). + */ +-#define ASPEED_G6_NUM_CLKS 73 ++#define ASPEED_G6_NUM_CLKS 76 + + #define ASPEED_G6_SILICON_REV 0x014 + #define CHIP_REVISION_ID GENMASK(23, 16) +@@ -59,8 +59,24 @@ + + #define ASPEED_G6_STRAP1 0x500 + ++#define ASPEED_UARTCLK_FROM_UXCLK 0x338 ++ + #define ASPEED_MAC12_CLK_DLY 0x340 ++#define ASPEED_MAC12_CLK_DLY_100M 0x348 ++#define ASPEED_MAC12_CLK_DLY_10M 0x34C ++ + #define ASPEED_MAC34_CLK_DLY 0x350 ++#define ASPEED_MAC34_CLK_DLY_100M 0x358 ++#define ASPEED_MAC34_CLK_DLY_10M 0x35C ++ ++#define ASPEED_G6_MAC34_DRIVING_CTRL 0x458 ++ ++#define ASPEED_G6_DEF_MAC12_DELAY_1G 0x0028a410 ++#define ASPEED_G6_DEF_MAC12_DELAY_100M 0x00410410 ++#define ASPEED_G6_DEF_MAC12_DELAY_10M 0x00410410 ++#define ASPEED_G6_DEF_MAC34_DELAY_1G 0x00104208 ++#define ASPEED_G6_DEF_MAC34_DELAY_100M 0x00104208 ++#define ASPEED_G6_DEF_MAC34_DELAY_10M 0x00104208 + + /* Globally visible clocks */ + static DEFINE_SPINLOCK(aspeed_g6_clk_lock); +@@ -72,6 +88,45 @@ + /* AST2600 revision: A0, A1, A2, etc */ + static u8 soc_rev; + ++struct mac_delay_config { ++ u32 tx_delay_1000; ++ u32 rx_delay_1000; ++ u32 tx_delay_100; ++ u32 rx_delay_100; ++ u32 tx_delay_10; ++ u32 rx_delay_10; ++}; ++ ++union mac_delay_1g { ++ u32 w; ++ struct { ++ unsigned int tx_delay_1 : 6; /* bit[5:0] */ ++ unsigned int tx_delay_2 : 6; /* bit[11:6] */ ++ unsigned int rx_delay_1 : 6; /* bit[17:12] */ ++ unsigned int rx_delay_2 : 6; /* bit[23:18] */ ++ unsigned int rx_clk_inv_1 : 1; /* bit[24] */ ++ unsigned int rx_clk_inv_2 : 1; /* bit[25] */ ++ unsigned int rmii_tx_data_at_falling_1 : 1; /* bit[26] */ ++ unsigned int rmii_tx_data_at_falling_2 : 1; /* bit[27] */ ++ unsigned int rgmiick_pad_dir : 1; /* bit[28] */ ++ unsigned int rmii_50m_oe_1 : 1; /* bit[29] */ ++ unsigned int rmii_50m_oe_2 : 1; /* bit[30] */ ++ unsigned int rgmii_125m_o_sel : 1; /* bit[31] */ ++ } b; ++}; ++ ++union mac_delay_100_10 { ++ u32 w; ++ struct { ++ unsigned int tx_delay_1 : 6; /* bit[5:0] */ ++ unsigned int tx_delay_2 : 6; /* bit[11:6] */ ++ unsigned int rx_delay_1 : 6; /* bit[17:12] */ ++ unsigned int rx_delay_2 : 6; /* bit[23:18] */ ++ unsigned int rx_clk_inv_1 : 1; /* bit[24] */ ++ unsigned int rx_clk_inv_2 : 1; /* bit[25] */ ++ unsigned int reserved_0 : 6; /* bit[31:26] */ ++ } b; ++}; + /* + * The majority of the clocks in the system are gates paired with a reset + * controller that holds the IP in reset; this is represented by the @reset_idx +@@ -99,14 +154,14 @@ + * ref0 and ref1 are essential for the SoC to operate + * mpll is required if SDRAM is used + */ +-static const struct aspeed_gate_data aspeed_g6_gates[] = { ++static struct aspeed_gate_data aspeed_g6_gates[] = { + /* clk rst name parent flags */ + [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ + [ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */ + [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ + /* vclk parent - dclk/d1clk/hclk/mclk */ + [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */ +- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ ++ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */ + /* From dpll */ + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */ + [ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL }, +@@ -128,8 +183,8 @@ + /* Reserved 26 */ + [ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */ + /* Reserved 28/29/30 */ +- [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */ +- [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */ ++ [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, CLK_IS_CRITICAL }, /* LPC */ ++ [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, CLK_IS_CRITICAL }, /* eSPI */ + [ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL }, + /* Reserved 35 */ + [ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */ +@@ -143,20 +198,20 @@ + [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */ + [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */ + /* Reserved: 46 & 47 */ +- [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */ +- [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */ +- [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */ +- [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */ ++ [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART1 */ ++ [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART2 */ ++ [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uxclk", 0 }, /* UART3 */ ++ [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uxclk", 0 }, /* UART4 */ + [ASPEED_CLK_GATE_MAC3CLK] = { 52, 52, "mac3clk-gate", "mac34", 0 }, /* MAC3 */ + [ASPEED_CLK_GATE_MAC4CLK] = { 53, 53, "mac4clk-gate", "mac34", 0 }, /* MAC4 */ +- [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uartx", 0 }, /* UART6 */ +- [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uartx", 0 }, /* UART7 */ +- [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uartx", 0 }, /* UART8 */ +- [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uartx", 0 }, /* UART9 */ +- [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uartx", 0 }, /* UART10 */ +- [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */ +- [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */ +- [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */ ++ [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uxclk", 0 }, /* UART6 */ ++ [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uxclk", 0 }, /* UART7 */ ++ [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uxclk", 0 }, /* UART8 */ ++ [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uxclk", 0 }, /* UART9 */ ++ [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uxclk", 0 }, /* UART10 */ ++ [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART11 */ ++ [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uxclk", 0 }, /* UART12 */ ++ [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uxclk", 0 }, /* UART13 */ + [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", "fsiclk", 0 }, /* FSI */ + }; + +@@ -184,6 +239,18 @@ + { 0 } + }; + ++static const struct clk_div_table ast2600_sd_div_table[] = { ++ { 0x0, 2 }, ++ { 0x1, 4 }, ++ { 0x2, 6 }, ++ { 0x3, 8 }, ++ { 0x4, 10 }, ++ { 0x5, 12 }, ++ { 0x6, 14 }, ++ { 0x7, 1 }, ++ { 0 } ++}; ++ + static const struct clk_div_table ast2600_mac_div_table[] = { + { 0x0, 4 }, + { 0x1, 4 }, +@@ -384,9 +451,14 @@ + struct aspeed_reset *ar = to_aspeed_reset(rcdev); + u32 rst = get_bit(id); + u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL; ++ u32 val; ++ int ret; + + /* Use set to clear register */ +- return regmap_write(ar->map, reg + 0x04, rst); ++ ret = regmap_write(ar->map, reg + 0x04, rst); ++ /* Add dummy read to ensure the write transfer is finished */ ++ regmap_read(ar->map, reg + 4, &val); ++ return ret; + } + + static int aspeed_g6_reset_assert(struct reset_controller_dev *rcdev, +@@ -458,11 +530,6 @@ + return hw; + } + +-static const char *const emmc_extclk_parent_names[] = { +- "emmc_extclk_hpll_in", +- "mpll", +-}; +- + static const char * const vclk_parent_names[] = { + "dpll", + "d1pll", +@@ -484,7 +551,7 @@ + struct aspeed_reset *ar; + struct regmap *map; + struct clk_hw *hw; +- u32 val, rate; ++ u32 val; + int i, ret; + + map = syscon_node_to_regmap(dev->of_node); +@@ -510,70 +577,50 @@ + return ret; + } + +- /* UART clock div13 setting */ +- regmap_read(map, ASPEED_G6_MISC_CTRL, &val); +- if (val & UART_DIV13_EN) +- rate = 24000000 / 13; +- else +- rate = 24000000; +- hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate); +- if (IS_ERR(hw)) +- return PTR_ERR(hw); +- aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw; +- +- /* UART6~13 clock div13 setting */ +- regmap_read(map, 0x80, &val); +- if (val & BIT(31)) +- rate = 24000000 / 13; +- else +- rate = 24000000; +- hw = clk_hw_register_fixed_rate(dev, "uartx", NULL, 0, rate); +- if (IS_ERR(hw)) +- return PTR_ERR(hw); +- aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw; +- +- /* EMMC ext clock */ +- hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll", +- 0, 1, 2); +- if (IS_ERR(hw)) +- return PTR_ERR(hw); +- +- hw = clk_hw_register_mux(dev, "emmc_extclk_mux", +- emmc_extclk_parent_names, +- ARRAY_SIZE(emmc_extclk_parent_names), 0, +- scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1, +- 0, &aspeed_g6_clk_lock); +- if (IS_ERR(hw)) +- return PTR_ERR(hw); ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(14, 11), BIT(11)); + +- hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux", +- 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1, +- 15, 0, &aspeed_g6_clk_lock); ++ /* EMMC ext clock divider */ ++ hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "mpll", 0, ++ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0, ++ &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + +- hw = clk_hw_register_divider_table(dev, "emmc_extclk", +- "emmc_extclk_gate", 0, +- scu_g6_base + +- ASPEED_G6_CLK_SELECTION1, 12, +- 3, 0, ast2600_emmc_extclk_div_table, ++ //ast2600 emmc clk should under 200Mhz ++ hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0, ++ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0, ++ ast2600_emmc_extclk_div_table, + &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw; + +- /* SD/SDIO clock divider and gate */ +- hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0, +- scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0, +- &aspeed_g6_clk_lock); +- if (IS_ERR(hw)) +- return PTR_ERR(hw); ++ clk_hw_register_fixed_rate(NULL, "hclk", NULL, 0, 200000000); ++ ++ regmap_read(map, 0x310, &val); ++ if (val & BIT(8)) { ++ /* SD/SDIO clock divider and gate */ ++ hw = clk_hw_register_gate(dev, "sd_extclk_gate", "apll", 0, ++ scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0, ++ &aspeed_g6_clk_lock); ++ if (IS_ERR(hw)) ++ return PTR_ERR(hw); ++ } else { ++ /* SD/SDIO clock divider and gate */ ++ hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hclk", 0, ++ scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0, ++ &aspeed_g6_clk_lock); ++ if (IS_ERR(hw)) ++ return PTR_ERR(hw); ++ } ++ + hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate", +- 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0, +- ast2600_div_table, +- &aspeed_g6_clk_lock); ++ 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0, ++ ast2600_sd_div_table, ++ &aspeed_g6_clk_lock); + if (IS_ERR(hw)) + return PTR_ERR(hw); ++ + aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw; + + /* MAC1/2 RMII 50MHz RCLK */ +@@ -645,8 +692,8 @@ + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_LHCLK] = hw; + +- /* gfx d1clk : use dp clk */ +- regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10)); ++ /* gfx d1clk : use usb phy */ ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(9)); + /* SoC Display clock selection */ + hw = clk_hw_register_mux(dev, "d1clk", d1clk_parent_names, + ARRAY_SIZE(d1clk_parent_names), 0, +@@ -677,6 +724,8 @@ + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_VCLK] = hw; + ++ //vclk : force disable dynmamic slow down and fix vclk = eclk / 2 ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(31, 28), 0); + /* Video Engine clock divider */ + hw = clk_hw_register_divider_table(dev, "eclk", NULL, 0, + scu_g6_base + ASPEED_G6_CLK_SELECTION1, 28, 3, 0, +@@ -686,6 +735,26 @@ + return PTR_ERR(hw); + aspeed_g6_clk_data->hws[ASPEED_CLK_ECLK] = hw; + ++ /* uartx parent assign*/ ++ for (i = 0; i < 13; i++) { ++ if (i < 6 && i != 4) { ++ regmap_read(map, 0x310, &val); ++ if (val & BIT(i)) ++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "huxclk"; ++ else ++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uxclk"; ++ } ++ if (i == 4) ++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uart5"; ++ if (i > 5 && i != 4) { ++ regmap_read(map, 0x314, &val); ++ if (val & BIT(i)) ++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "huxclk"; ++ else ++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uxclk"; ++ } ++ } ++ + for (i = 0; i < ARRAY_SIZE(aspeed_g6_gates); i++) { + const struct aspeed_gate_data *gd = &aspeed_g6_gates[i]; + u32 gate_flags; +@@ -749,7 +818,8 @@ + static void __init aspeed_g6_cc(struct regmap *map) + { + struct clk_hw *hw; +- u32 val, div, divbits, axi_div, ahb_div; ++ u32 val, freq, div, divbits, axi_div, ahb_div; ++ u32 mult; + + clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000); + +@@ -814,6 +884,55 @@ + hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000); + aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw; + ++ /* uart5 clock selection */ ++ regmap_read(map, ASPEED_G6_MISC_CTRL, &val); ++ if (val & UART_DIV13_EN) ++ div = 13; ++ else ++ div = 1; ++ regmap_read(map, ASPEED_G6_CLK_SELECTION2, &val); ++ if (val & BIT(14)) ++ freq = 192000000; ++ else ++ freq = 24000000; ++ freq = freq / div; ++ ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UART5] = clk_hw_register_fixed_rate(NULL, "uart5", NULL, 0, freq); ++ ++ /* UART1~13 clock div13 setting except uart5 */ ++ regmap_read(map, ASPEED_G6_CLK_SELECTION5, &val); ++ ++ switch (val & 0x3) { ++ case 0: ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 4); ++ break; ++ case 1: ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 2); ++ break; ++ case 2: ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 1); ++ break; ++ case 3: ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "ahb", 0, 1, 1); ++ break; ++ } ++ ++ /* uxclk */ ++ regmap_read(map, ASPEED_UARTCLK_FROM_UXCLK, &val); ++ div = ((val >> 8) & 0x3ff) * 2; ++ mult = val & 0xff; ++ ++ hw = clk_hw_register_fixed_factor(NULL, "uxclk", "uartx", 0, mult, div); ++ aspeed_g6_clk_data->hws[ASPEED_CLK_UXCLK] = hw; ++ ++ /* huxclk */ ++ regmap_read(map, 0x33c, &val); ++ div = ((val >> 8) & 0x3ff) * 2; ++ mult = val & 0xff; ++ ++ hw = clk_hw_register_fixed_factor(NULL, "huxclk", "uartx", 0, mult, div); ++ aspeed_g6_clk_data->hws[ASPEED_CLK_HUXCLK] = hw; ++ + /* i3c clock: source from apll, divide by 8 */ + regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5, + I3C_CLK_SELECTION | APLL_DIV_SELECTION, +@@ -829,6 +948,10 @@ + static void __init aspeed_g6_cc_init(struct device_node *np) + { + struct regmap *map; ++ struct mac_delay_config mac_cfg; ++ union mac_delay_1g reg_1g; ++ union mac_delay_100_10 reg_100, reg_10; ++ u32 uart_clk_source = 0; + int ret; + int i; + +@@ -863,6 +986,100 @@ + return; + } + ++ of_property_read_u32(np, "uart-clk-source", &uart_clk_source); ++ ++ if (uart_clk_source) { ++ if (uart_clk_source & GENMASK(5, 0)) ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION4, GENMASK(5, 0), uart_clk_source & GENMASK(5, 0)); ++ ++ if (uart_clk_source & GENMASK(12, 6)) ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5, GENMASK(12, 6), uart_clk_source & GENMASK(12, 6)); ++ } ++ ++ /* fixed settings for RGMII/RMII clock generator */ ++ /* MAC1/2 RGMII 125MHz = EPLL / 8 */ ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION2, GENMASK(23, 20), ++ (0x7 << 20)); ++ ++ /* MAC3/4 RMII 50MHz = HCLK / 4 */ ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION4, GENMASK(18, 16), ++ (0x3 << 16)); ++ ++ /* BIT[31]: MAC1/2 RGMII 125M source = internal PLL ++ * BIT[28]: RGMIICK pad direction = output ++ */ ++ regmap_write(map, ASPEED_MAC12_CLK_DLY, ++ BIT(31) | BIT(28) | ASPEED_G6_DEF_MAC12_DELAY_1G); ++ regmap_write(map, ASPEED_MAC12_CLK_DLY_100M, ++ ASPEED_G6_DEF_MAC12_DELAY_100M); ++ regmap_write(map, ASPEED_MAC12_CLK_DLY_10M, ++ ASPEED_G6_DEF_MAC12_DELAY_10M); ++ ++ /* MAC3/4 RGMII 125M source = RGMIICK pad */ ++ regmap_write(map, ASPEED_MAC34_CLK_DLY, ++ ASPEED_G6_DEF_MAC34_DELAY_1G); ++ regmap_write(map, ASPEED_MAC34_CLK_DLY_100M, ++ ASPEED_G6_DEF_MAC34_DELAY_100M); ++ regmap_write(map, ASPEED_MAC34_CLK_DLY_10M, ++ ASPEED_G6_DEF_MAC34_DELAY_10M); ++ ++ /* MAC3/4 default pad driving strength */ ++ regmap_write(map, ASPEED_G6_MAC34_DRIVING_CTRL, 0x0000000f); ++ ++ regmap_read(map, ASPEED_MAC12_CLK_DLY, ®_1g.w); ++ regmap_read(map, ASPEED_MAC12_CLK_DLY_100M, ®_100.w); ++ regmap_read(map, ASPEED_MAC12_CLK_DLY_10M, ®_10.w); ++ ret = of_property_read_u32_array(np, "mac0-clk-delay", (u32 *)&mac_cfg, 6); ++ if (!ret) { ++ reg_1g.b.tx_delay_1 = mac_cfg.tx_delay_1000; ++ reg_1g.b.rx_delay_1 = mac_cfg.rx_delay_1000; ++ reg_100.b.tx_delay_1 = mac_cfg.tx_delay_100; ++ reg_100.b.rx_delay_1 = mac_cfg.rx_delay_100; ++ reg_10.b.tx_delay_1 = mac_cfg.tx_delay_10; ++ reg_10.b.rx_delay_1 = mac_cfg.rx_delay_10; ++ } ++ ret = of_property_read_u32_array(np, "mac1-clk-delay", (u32 *)&mac_cfg, 6); ++ if (!ret) { ++ reg_1g.b.tx_delay_2 = mac_cfg.tx_delay_1000; ++ reg_1g.b.rx_delay_2 = mac_cfg.rx_delay_1000; ++ reg_100.b.tx_delay_2 = mac_cfg.tx_delay_100; ++ reg_100.b.rx_delay_2 = mac_cfg.rx_delay_100; ++ reg_10.b.tx_delay_2 = mac_cfg.tx_delay_10; ++ reg_10.b.rx_delay_2 = mac_cfg.rx_delay_10; ++ } ++ regmap_write(map, ASPEED_MAC12_CLK_DLY, reg_1g.w); ++ regmap_write(map, ASPEED_MAC12_CLK_DLY_100M, reg_100.w); ++ regmap_write(map, ASPEED_MAC12_CLK_DLY_10M, reg_10.w); ++ ++ regmap_read(map, ASPEED_MAC34_CLK_DLY, ®_1g.w); ++ regmap_read(map, ASPEED_MAC34_CLK_DLY_100M, ®_100.w); ++ regmap_read(map, ASPEED_MAC34_CLK_DLY_10M, ®_10.w); ++ ret = of_property_read_u32_array(np, "mac2-clk-delay", (u32 *)&mac_cfg, 6); ++ if (!ret) { ++ reg_1g.b.tx_delay_1 = mac_cfg.tx_delay_1000; ++ reg_1g.b.rx_delay_1 = mac_cfg.rx_delay_1000; ++ reg_100.b.tx_delay_1 = mac_cfg.tx_delay_100; ++ reg_100.b.rx_delay_1 = mac_cfg.rx_delay_100; ++ reg_10.b.tx_delay_1 = mac_cfg.tx_delay_10; ++ reg_10.b.rx_delay_1 = mac_cfg.rx_delay_10; ++ } ++ ret = of_property_read_u32_array(np, "mac3-clk-delay", (u32 *)&mac_cfg, 6); ++ if (!ret) { ++ reg_1g.b.tx_delay_2 = mac_cfg.tx_delay_1000; ++ reg_1g.b.rx_delay_2 = mac_cfg.rx_delay_1000; ++ reg_100.b.tx_delay_2 = mac_cfg.tx_delay_100; ++ reg_100.b.rx_delay_2 = mac_cfg.rx_delay_100; ++ reg_10.b.tx_delay_2 = mac_cfg.tx_delay_10; ++ reg_10.b.rx_delay_2 = mac_cfg.rx_delay_10; ++ } ++ regmap_write(map, ASPEED_MAC34_CLK_DLY, reg_1g.w); ++ regmap_write(map, ASPEED_MAC34_CLK_DLY_100M, reg_100.w); ++ regmap_write(map, ASPEED_MAC34_CLK_DLY_10M, reg_10.w); ++ ++ /* A0/A1 need change to RSA clock = HPLL/3, A2/A3 have been set at Rom Code */ ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, BIT(19), BIT(19)); ++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(27, 26), (2 << 26)); ++ + aspeed_g6_cc(map); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data); + if (ret) +diff --git a/drivers/clk/clk-ast2700.c b/drivers/clk/clk-ast2700.c +--- a/drivers/clk/clk-ast2700.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/clk/clk-ast2700.c 2025-12-23 10:16:20.644040714 +0000 +@@ -0,0 +1,1334 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) 2024 ASPEED Technology Inc. ++ * Author: Ryan Chen ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define REVISION_ID GENMASK(23, 16) ++ ++/* SOC0 */ ++#define SCU0_HWSTRAP1 0x010 ++#define SCU0_CLK_STOP 0x240 ++#define SCU0_CLK_SEL1 0x280 ++#define SCU0_CLK_SEL2 0x284 ++#define GET_USB_REFCLK_DIV(x) ((GENMASK(23, 20) & (x)) >> 20) ++#define UART_DIV13_EN BIT(30) ++#define SCU0_HPLL_PARAM 0x300 ++#define SCU0_DPLL_PARAM 0x308 ++#define SCU0_MPLL_PARAM 0x310 ++#define SCU0_D0CLK_PARAM 0x320 ++#define SCU0_D1CLK_PARAM 0x330 ++#define SCU0_CRT0CLK_PARAM 0x340 ++#define SCU0_CRT1CLK_PARAM 0x350 ++#define SCU0_MPHYCLK_PARAM 0x360 ++ ++/* SOC1 */ ++#define SCU1_CLK_STOP 0x240 ++#define AST2755_SCU1_CLK_STOP 0x248 ++#define SCU1_CLK_STOP2 0x260 ++#define SCU1_CLK_SEL1 0x280 ++#define SCU1_CLK_SEL2 0x284 ++#define SCU1_CLK_I3C_DIV_MASK GENMASK(25, 23) ++#define SCU1_CLK_I3C_DIV(n) ((n) - 1) ++#define UXCLK_MASK GENMASK(1, 0) ++#define HUXCLK_MASK GENMASK(4, 3) ++#define SCU1_HPLL_PARAM 0x300 ++#define SCU1_APLL_PARAM 0x310 ++#define SCU1_DPLL_PARAM 0x320 ++#define SCU1_UXCLK_CTRL 0x330 ++#define SCU1_HUXCLK_CTRL 0x334 ++#define SCU1_MAC12_CLK_DLY 0x390 ++#define SCU1_MAC12_CLK_DLY_100M 0x394 ++#define SCU1_MAC12_CLK_DLY_10M 0x398 ++ ++/* ++ * MAC Clock Delay settings ++ */ ++#define MAC_CLK_RMII1_50M_RCLK_O_CTRL BIT(30) ++#define MAC_CLK_RMII1_50M_RCLK_O_DIS 0 ++#define MAC_CLK_RMII1_50M_RCLK_O_EN 1 ++#define MAC_CLK_RMII0_50M_RCLK_O_CTRL BIT(29) ++#define MAC_CLK_RMII0_5M_RCLK_O_DIS 0 ++#define MAC_CLK_RMII0_5M_RCLK_O_EN 1 ++#define MAC_CLK_RMII_TXD_FALLING_2 BIT(27) ++#define MAC_CLK_RMII_TXD_FALLING_1 BIT(26) ++#define MAC_CLK_RXCLK_INV_2 BIT(25) ++#define MAC_CLK_RXCLK_INV_1 BIT(24) ++#define MAC_CLK_1G_INPUT_DELAY_2 GENMASK(23, 18) ++#define MAC_CLK_1G_INPUT_DELAY_1 GENMASK(17, 12) ++#define MAC_CLK_1G_OUTPUT_DELAY_2 GENMASK(11, 6) ++#define MAC_CLK_1G_OUTPUT_DELAY_1 GENMASK(5, 0) ++ ++#define MAC_CLK_100M_10M_RESERVED GENMASK(31, 26) ++#define MAC_CLK_100M_10M_RXCLK_INV_2 BIT(25) ++#define MAC_CLK_100M_10M_RXCLK_INV_1 BIT(24) ++#define MAC_CLK_100M_10M_INPUT_DELAY_2 GENMASK(23, 18) ++#define MAC_CLK_100M_10M_INPUT_DELAY_1 GENMASK(17, 12) ++#define MAC_CLK_100M_10M_OUTPUT_DELAY_2 GENMASK(11, 6) ++#define MAC_CLK_100M_10M_OUTPUT_DELAY_1 GENMASK(5, 0) ++ ++#define AST2700_DEF_MAC12_DELAY_1G_A0 0x00CF4D75 ++#define AST2700_DEF_MAC12_DELAY_1G_A1 0x005D6618 ++#define AST2700_DEF_MAC12_DELAY_100M 0x00410410 ++#define AST2700_DEF_MAC12_DELAY_10M 0x00410410 ++ ++struct mac_delay_config { ++ u32 tx_delay_1000; ++ u32 rx_delay_1000; ++ u32 tx_delay_100; ++ u32 rx_delay_100; ++ u32 tx_delay_10; ++ u32 rx_delay_10; ++}; ++ ++enum ast2700_clk_type { ++ CLK_MUX, ++ CLK_PLL, ++ CLK_HPLL, ++ CLK_GATE, ++ CLK_MISC, ++ CLK_FIXED, ++ CLK_FIXED_DISPLAY, ++ CLK_DIVIDER, ++ CLK_UART_PLL, ++ CLK_FIXED_FACTOR, ++ CLK_GATE_ASPEED, ++}; ++ ++struct ast2700_clk_fixed_factor_data { ++ unsigned int mult; ++ unsigned int div; ++ int parent_id; ++}; ++ ++struct ast2700_clk_gate_data { ++ int parent_id; ++ u32 flags; ++ u32 reg; ++ u8 bit; ++}; ++ ++struct ast2700_clk_mux_data { ++ const struct clk_hw **parent_hws; ++ const unsigned int *parent_ids; ++ unsigned int num_parents; ++ u8 bit_shift; ++ u8 bit_width; ++ u32 reg; ++}; ++ ++struct ast2700_clk_div_data { ++ const struct clk_div_table *div_table; ++ unsigned int parent_id; ++ u8 bit_shift; ++ u8 bit_width; ++ u32 reg; ++}; ++ ++struct ast2700_clk_pll_data { ++ unsigned int parent_id; ++ u32 reg; ++}; ++ ++struct ast2700_clk_fixed_rate_data { ++ unsigned long fixed_rate; ++}; ++ ++struct ast2700_clk_display_fixed_data { ++ u32 reg; ++}; ++ ++struct ast2700_clk_info { ++ u32 id; ++ const char *name; ++ u32 reg; ++ u32 type; ++ union { ++ struct ast2700_clk_fixed_factor_data factor; ++ struct ast2700_clk_fixed_rate_data rate; ++ struct ast2700_clk_display_fixed_data display_rate; ++ struct ast2700_clk_gate_data gate; ++ struct ast2700_clk_div_data div; ++ struct ast2700_clk_pll_data pll; ++ struct ast2700_clk_mux_data mux; ++ } data; ++}; ++ ++struct ast2700_clk_data { ++ const struct ast2700_clk_info *clk_info; ++ unsigned int nr_clks; ++ const int scu; ++}; ++ ++struct ast2700_clk_ctrl { ++ const struct ast2700_clk_data *clk_data; ++ struct device *dev; ++ void __iomem *base; ++ spinlock_t lock; /* clk lock */ ++}; ++ ++static const struct clk_div_table ast2700_rgmii_div_table[] = { ++ { 0x0, 4 }, ++ { 0x1, 4 }, ++ { 0x2, 6 }, ++ { 0x3, 8 }, ++ { 0x4, 10 }, ++ { 0x5, 12 }, ++ { 0x6, 14 }, ++ { 0x7, 16 }, ++ { 0 } ++}; ++ ++static const struct clk_div_table ast2700_rmii_div_table[] = { ++ { 0x0, 8 }, ++ { 0x1, 8 }, ++ { 0x2, 12 }, ++ { 0x3, 16 }, ++ { 0x4, 20 }, ++ { 0x5, 24 }, ++ { 0x6, 28 }, ++ { 0x7, 32 }, ++ { 0 } ++}; ++ ++static const struct clk_div_table ast2700_clk_div_table[] = { ++ { 0x0, 2 }, ++ { 0x1, 2 }, ++ { 0x2, 3 }, ++ { 0x3, 4 }, ++ { 0x4, 5 }, ++ { 0x5, 6 }, ++ { 0x6, 7 }, ++ { 0x7, 8 }, ++ { 0 } ++}; ++ ++static const struct clk_div_table ast2700_clk_div_table2[] = { ++ { 0x0, 2 }, ++ { 0x1, 4 }, ++ { 0x2, 6 }, ++ { 0x3, 8 }, ++ { 0x4, 10 }, ++ { 0x5, 12 }, ++ { 0x6, 14 }, ++ { 0x7, 16 }, ++ { 0 } ++}; ++ ++static const struct clk_div_table ast2700_hclk_div_table[] = { ++ { 0x0, 6 }, ++ { 0x1, 5 }, ++ { 0x2, 4 }, ++ { 0x3, 7 }, ++ { 0 } ++}; ++ ++static const struct clk_div_table ast2700_clk_uart_div_table[] = { ++ { 0x0, 1 }, ++ { 0x1, 13 }, ++ { 0 } ++}; ++ ++/* soc 0 */ ++static const unsigned int psp_parent_ids[] = { ++ SCU0_CLK_MPLL, ++ SCU0_CLK_HPLL, ++ SCU0_CLK_HPLL, ++ SCU0_CLK_HPLL, ++ SCU0_CLK_MPLL_DIV2, ++ SCU0_CLK_HPLL_DIV2, ++ SCU0_CLK_HPLL, ++ SCU0_CLK_HPLL ++}; ++ ++static const struct clk_hw *psp_parent_hws[ARRAY_SIZE(psp_parent_ids)]; ++ ++static const unsigned int hclk_parent_ids[] = { ++ SCU0_CLK_HPLL, ++ SCU0_CLK_MPLL ++}; ++ ++static const struct clk_hw *hclk_parent_hws[ARRAY_SIZE(hclk_parent_ids)]; ++ ++static const unsigned int emmc_parent_ids[] = { ++ SCU0_CLK_MPLL_DIV4, ++ SCU0_CLK_HPLL_DIV4 ++}; ++ ++static const struct clk_hw *emmc_parent_hws[ARRAY_SIZE(emmc_parent_ids)]; ++ ++static const unsigned int mphy_parent_ids[] = { ++ SCU0_CLK_MPLL, ++ SCU0_CLK_HPLL, ++ SCU0_CLK_DPLL, ++ SCU0_CLK_192M ++}; ++ ++static const struct clk_hw *mphy_parent_hws[ARRAY_SIZE(mphy_parent_ids)]; ++ ++static const unsigned int u2phy_parent_ids[] = { ++ SCU0_CLK_MPLL, ++ SCU0_CLK_HPLL ++}; ++ ++static const struct clk_hw *u2phy_parent_hws[ARRAY_SIZE(u2phy_parent_ids)]; ++ ++static const unsigned int uart_parent_ids[] = { ++ SCU0_CLK_24M, ++ SCU0_CLK_192M ++}; ++ ++static const struct clk_hw *uart_parent_hws[ARRAY_SIZE(uart_parent_ids)]; ++ ++/* soc 1 */ ++static const unsigned int uartx_parent_ids[] = { ++ SCU1_CLK_UARTX, ++ SCU1_CLK_HUARTX ++}; ++ ++static const struct clk_hw *uartx_parent_hws[ARRAY_SIZE(uartx_parent_ids)]; ++ ++static const unsigned int uxclk_parent_ids[] = { ++ SCU1_CLK_APLL_DIV4, ++ SCU1_CLK_APLL_DIV2, ++ SCU1_CLK_APLL, ++ SCU1_CLK_HPLL ++}; ++ ++static const struct clk_hw *uxclk_parent_hws[ARRAY_SIZE(uxclk_parent_ids)]; ++ ++static const unsigned int sdclk_parent_ids[] = { ++ SCU1_CLK_HPLL, ++ SCU1_CLK_APLL ++}; ++ ++static const struct clk_hw *sdclk_parent_hws[ARRAY_SIZE(sdclk_parent_ids)]; ++ ++#define FIXED_CLK(_id, _name, _rate) \ ++ { \ ++ .id = _id, \ ++ .type = CLK_FIXED, \ ++ .name = _name, \ ++ .data = { .rate = { .fixed_rate = _rate, } }, \ ++ } ++ ++#define FIXED_DISPLAY_CLK(_id, _name, _reg) \ ++ { \ ++ .id = _id, \ ++ .type = CLK_FIXED_DISPLAY, \ ++ .name = _name, \ ++ .data = { .display_rate = { .reg = _reg } }, \ ++ } ++ ++#define PLL_CLK(_id, _type, _name, _parent_id, _reg) \ ++ { \ ++ .id = _id, \ ++ .type = _type, \ ++ .name = _name, \ ++ .data = { .pll = { \ ++ .parent_id = _parent_id, \ ++ .reg = _reg, \ ++ } }, \ ++ } ++ ++#define MUX_CLK(_id, _name, _parent_ids, _num_parents, _parent_hws, _reg, _shift, _width) \ ++ { \ ++ .id = _id, \ ++ .type = CLK_MUX, \ ++ .name = _name, \ ++ .data = { \ ++ .mux = { \ ++ .parent_ids = _parent_ids, \ ++ .parent_hws = _parent_hws, \ ++ .num_parents = _num_parents, \ ++ .reg = (_reg), \ ++ .bit_shift = _shift, \ ++ .bit_width = _width, \ ++ }, \ ++ }, \ ++ } ++ ++#define DIVIDER_CLK(_id, _name, _parent_id, _reg, _shift, _width, _div_table) \ ++ { \ ++ .id = _id, \ ++ .type = CLK_DIVIDER, \ ++ .name = _name, \ ++ .data = { \ ++ .div = { \ ++ .parent_id = _parent_id, \ ++ .reg = _reg, \ ++ .bit_shift = _shift, \ ++ .bit_width = _width, \ ++ .div_table = _div_table, \ ++ }, \ ++ }, \ ++ } ++ ++#define FIXED_FACTOR_CLK(_id, _name, _parent_id, _mult, _div) \ ++ { \ ++ .id = _id, \ ++ .type = CLK_FIXED_FACTOR, \ ++ .name = _name, \ ++ .data = { .factor = { .parent_id = _parent_id, .mult = _mult, .div = _div, } }, \ ++ } ++ ++#define GATE_CLK(_id, _type, _name, _parent_id, _reg, _bit, _flags) \ ++ { \ ++ .id = _id, \ ++ .type = _type, \ ++ .name = _name, \ ++ .data = { \ ++ .gate = { \ ++ .parent_id = _parent_id, \ ++ .reg = _reg, \ ++ .bit = _bit, \ ++ .flags = _flags, \ ++ }, \ ++ }, \ ++ } ++ ++static const struct ast2700_clk_info ast2700_scu0_clk_info[] __initconst = { ++ FIXED_CLK(SCU0_CLKIN, "soc0-clkin", 25 * HZ_PER_MHZ), ++ FIXED_CLK(SCU0_CLK_24M, "soc0-clk24Mhz", 24 * HZ_PER_MHZ), ++ FIXED_CLK(SCU0_CLK_192M, "soc0-clk192Mhz", 192 * HZ_PER_MHZ), ++ FIXED_CLK(SCU0_CLK_U2PHY_CLK12M, "u2phy_clk12m", 12 * HZ_PER_MHZ), ++ FIXED_DISPLAY_CLK(SCU0_CLK_D0, "d0clk", SCU0_D0CLK_PARAM), ++ FIXED_DISPLAY_CLK(SCU0_CLK_D1, "d1clk", SCU0_D1CLK_PARAM), ++ FIXED_DISPLAY_CLK(SCU0_CLK_CRT0, "crt0clk", SCU0_CRT0CLK_PARAM), ++ FIXED_DISPLAY_CLK(SCU0_CLK_CRT1, "crt1clk", SCU0_CRT1CLK_PARAM), ++ PLL_CLK(SCU0_CLK_HPLL, CLK_HPLL, "soc0-hpll", SCU0_CLKIN, SCU0_HPLL_PARAM), ++ PLL_CLK(SCU0_CLK_DPLL, CLK_PLL, "soc0-dpll", SCU0_CLKIN, SCU0_DPLL_PARAM), ++ PLL_CLK(SCU0_CLK_MPLL, CLK_PLL, "soc0-mpll", SCU0_CLKIN, SCU0_MPLL_PARAM), ++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV2, "soc0-hpll_div2", SCU0_CLK_HPLL, 1, 2), ++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV4, "soc0-hpll_div4", SCU0_CLK_HPLL, 1, 4), ++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV2, "soc0-mpll_div2", SCU0_CLK_MPLL, 1, 2), ++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV4, "soc0-mpll_div4", SCU0_CLK_MPLL, 1, 4), ++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV8, "soc0-mpll_div8", SCU0_CLK_MPLL, 1, 8), ++ FIXED_FACTOR_CLK(SCU0_CLK_AXI1, "axi1clk", SCU0_CLK_MPLL, 1, 4), ++ MUX_CLK(SCU0_CLK_PSP, "pspclk", psp_parent_ids, ARRAY_SIZE(psp_parent_ids), ++ psp_parent_hws, SCU0_HWSTRAP1, 2, 3), ++ FIXED_FACTOR_CLK(SCU0_CLK_AXI0, "axi0clk", SCU0_CLK_PSP, 1, 2), ++ MUX_CLK(SCU0_CLK_AHBMUX, "soc0-ahbmux", hclk_parent_ids, ARRAY_SIZE(hclk_parent_ids), ++ hclk_parent_hws, SCU0_HWSTRAP1, 7, 1), ++ MUX_CLK(SCU0_CLK_EMMCMUX, "emmcsrc-mux", emmc_parent_ids, ARRAY_SIZE(emmc_parent_ids), ++ emmc_parent_hws, SCU0_CLK_SEL1, 11, 1), ++ MUX_CLK(SCU0_CLK_MPHYSRC, "mphysrc", mphy_parent_ids, ARRAY_SIZE(mphy_parent_ids), ++ mphy_parent_hws, SCU0_CLK_SEL2, 18, 2), ++ MUX_CLK(SCU0_CLK_U2PHY_REFCLKSRC, "u2phy_refclksrc", u2phy_parent_ids, ++ ARRAY_SIZE(u2phy_parent_ids), u2phy_parent_hws, SCU0_CLK_SEL2, 23, 1), ++ MUX_CLK(SCU0_CLK_UART, "soc0-uartclk", uart_parent_ids, ARRAY_SIZE(uart_parent_ids), ++ uart_parent_hws, SCU0_CLK_SEL2, 14, 1), ++ PLL_CLK(SCU0_CLK_MPHY, CLK_MISC, "mphyclk", SCU0_CLK_MPHYSRC, SCU0_MPHYCLK_PARAM), ++ PLL_CLK(SCU0_CLK_U2PHY_REFCLK, CLK_MISC, "u2phy_refclk", SCU0_CLK_U2PHY_REFCLKSRC, ++ SCU0_CLK_SEL2), ++ DIVIDER_CLK(SCU0_CLK_AHB, "soc0-ahb", SCU0_CLK_AHBMUX, ++ SCU0_HWSTRAP1, 5, 2, ast2700_hclk_div_table), ++ DIVIDER_CLK(SCU0_CLK_EMMC, "emmcclk", SCU0_CLK_EMMCMUX, ++ SCU0_CLK_SEL1, 12, 3, ast2700_clk_div_table2), ++ DIVIDER_CLK(SCU0_CLK_APB, "soc0-apb", SCU0_CLK_AXI0, ++ SCU0_CLK_SEL1, 23, 3, ast2700_clk_div_table2), ++ DIVIDER_CLK(SCU0_CLK_HPLL_DIV_AHB, "soc0-hpll-ahb", SCU0_CLK_HPLL, ++ SCU0_HWSTRAP1, 5, 2, ast2700_hclk_div_table), ++ DIVIDER_CLK(SCU0_CLK_MPLL_DIV_AHB, "soc0-mpll-ahb", SCU0_CLK_MPLL, ++ SCU0_HWSTRAP1, 5, 2, ast2700_hclk_div_table), ++ DIVIDER_CLK(SCU0_CLK_UART4, "uart4clk", SCU0_CLK_UART, ++ SCU0_CLK_SEL2, 30, 1, ast2700_clk_uart_div_table), ++ GATE_CLK(SCU0_CLK_GATE_MCLK, CLK_GATE_ASPEED, "mclk-gate", SCU0_CLK_MPLL, ++ SCU0_CLK_STOP, 0, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_ECLK, CLK_GATE_ASPEED, "eclk-gate", -1, SCU0_CLK_STOP, 1, 0), ++ GATE_CLK(SCU0_CLK_GATE_2DCLK, CLK_GATE_ASPEED, "gclk-gate", -1, SCU0_CLK_STOP, 2, 0), ++ GATE_CLK(SCU0_CLK_GATE_VCLK, CLK_GATE_ASPEED, "vclk-gate", -1, SCU0_CLK_STOP, 3, 0), ++ GATE_CLK(SCU0_CLK_GATE_BCLK, CLK_GATE_ASPEED, "bclk-gate", -1, ++ SCU0_CLK_STOP, 4, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_VGA0CLK, CLK_GATE_ASPEED, "vga0clk-gate", -1, ++ SCU0_CLK_STOP, 5, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc0-refclk-gate", SCU0_CLKIN, ++ SCU0_CLK_STOP, 6, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_PORTBUSB2CLK, CLK_GATE_ASPEED, "portb-usb2clk-gate", -1, ++ SCU0_CLK_STOP, 7, 0), ++ GATE_CLK(SCU0_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "uhciclk-gate", -1, SCU0_CLK_STOP, 9, 0), ++ GATE_CLK(SCU0_CLK_GATE_VGA1CLK, CLK_GATE_ASPEED, "vga1clk-gate", -1, ++ SCU0_CLK_STOP, 10, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_DDRPHYCLK, CLK_GATE_ASPEED, "ddrphy-gate", -1, ++ SCU0_CLK_STOP, 11, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_E2M0CLK, CLK_GATE_ASPEED, "e2m0clk-gate", -1, ++ SCU0_CLK_STOP, 12, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_HACCLK, CLK_GATE_ASPEED, "hacclk-gate", -1, SCU0_CLK_STOP, 13, 0), ++ GATE_CLK(SCU0_CLK_GATE_PORTAUSB2CLK, CLK_GATE_ASPEED, "porta-usb2clk-gate", -1, ++ SCU0_CLK_STOP, 14, 0), ++ GATE_CLK(SCU0_CLK_GATE_UART4CLK, CLK_GATE_ASPEED, "uart4clk-gate", SCU0_CLK_UART4, ++ SCU0_CLK_STOP, 15, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc0-sliclk-gate", -1, ++ SCU0_CLK_STOP, 16, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_DACCLK, CLK_GATE_ASPEED, "dacclk-gate", -1, ++ SCU0_CLK_STOP, 17, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_DP, CLK_GATE_ASPEED, "dpclk-gate", -1, ++ SCU0_CLK_STOP, 18, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_E2M1CLK, CLK_GATE_ASPEED, "e2m1clk-gate", -1, ++ SCU0_CLK_STOP, 19, CLK_IS_CRITICAL), ++ GATE_CLK(SCU0_CLK_GATE_CRT0CLK, CLK_GATE_ASPEED, "crt0clk-gate", -1, ++ SCU0_CLK_STOP, 20, 0), ++ GATE_CLK(SCU0_CLK_GATE_CRT1CLK, CLK_GATE_ASPEED, "crt1clk-gate", -1, ++ SCU0_CLK_STOP, 21, 0), ++ GATE_CLK(SCU0_CLK_GATE_ECDSACLK, CLK_GATE_ASPEED, "eccclk-gate", -1, ++ SCU0_CLK_STOP, 23, 0), ++ GATE_CLK(SCU0_CLK_GATE_RSACLK, CLK_GATE_ASPEED, "rsaclk-gate", -1, ++ SCU0_CLK_STOP, 24, 0), ++ GATE_CLK(SCU0_CLK_GATE_RVAS0CLK, CLK_GATE_ASPEED, "rvas0clk-gate", -1, ++ SCU0_CLK_STOP, 25, 0), ++ GATE_CLK(SCU0_CLK_GATE_UFSCLK, CLK_GATE_ASPEED, "ufsclk-gate", -1, ++ SCU0_CLK_STOP, 26, 0), ++ GATE_CLK(SCU0_CLK_GATE_EMMCCLK, CLK_GATE_ASPEED, "emmcclk-gate", SCU0_CLK_EMMC, ++ SCU0_CLK_STOP, 27, 0), ++ GATE_CLK(SCU0_CLK_GATE_RVAS1CLK, CLK_GATE_ASPEED, "rvas1clk-gate", -1, ++ SCU0_CLK_STOP, 28, 0), ++}; ++ ++static const struct ast2700_clk_info ast2700_scu1_clk_info[] __initconst = { ++ FIXED_CLK(SCU1_CLKIN, "soc1-clkin", 25 * HZ_PER_MHZ), ++ PLL_CLK(SCU1_CLK_HPLL, CLK_PLL, "soc1-hpll", SCU1_CLKIN, SCU1_HPLL_PARAM), ++ PLL_CLK(SCU1_CLK_APLL, CLK_PLL, "soc1-apll", SCU1_CLKIN, SCU1_APLL_PARAM), ++ PLL_CLK(SCU1_CLK_DPLL, CLK_PLL, "soc1-dpll", SCU1_CLKIN, SCU1_DPLL_PARAM), ++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV2, "soc1-apll_div2", SCU1_CLK_APLL, 1, 2), ++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV4, "soc1-apll_div4", SCU1_CLK_APLL, 1, 4), ++ FIXED_FACTOR_CLK(SCU1_CLK_CAN, "canclk", SCU1_CLK_APLL, 1, 10), ++ DIVIDER_CLK(SCU1_CLK_APB, "soc1-apb", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 18, 3, ast2700_clk_div_table2), ++ DIVIDER_CLK(SCU1_CLK_RMII, "rmii", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 21, 3, ast2700_rmii_div_table), ++ DIVIDER_CLK(SCU1_CLK_RGMII, "rgmii", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 25, 3, ast2700_rgmii_div_table), ++ DIVIDER_CLK(SCU1_CLK_MACHCLK, "machclk", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 29, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_APLL_DIVN, "soc1-apll_divn", ++ SCU1_CLK_APLL, SCU1_CLK_SEL2, 8, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_AHB, "soc1-ahb", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL2, 20, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_I3C, "soc1-i3c", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL2, 23, 3, ast2700_clk_div_table), ++ MUX_CLK(SCU1_CLK_SDMUX, "sdclk-mux", sdclk_parent_ids, ARRAY_SIZE(sdclk_parent_ids), ++ sdclk_parent_hws, SCU1_CLK_SEL1, 13, 1), ++ MUX_CLK(SCU1_CLK_UXCLK, "uxclk", uxclk_parent_ids, ARRAY_SIZE(uxclk_parent_ids), ++ uxclk_parent_hws, SCU1_CLK_SEL2, 0, 2), ++ MUX_CLK(SCU1_CLK_HUXCLK, "huxclk", uxclk_parent_ids, ARRAY_SIZE(uxclk_parent_ids), ++ uxclk_parent_hws, SCU1_CLK_SEL2, 3, 2), ++ DIVIDER_CLK(SCU1_CLK_SDCLK, "sdclk", SCU1_CLK_SDMUX, ++ SCU1_CLK_SEL1, 14, 3, ast2700_clk_div_table), ++ PLL_CLK(SCU1_CLK_UARTX, CLK_UART_PLL, "uartxclk", SCU1_CLK_UXCLK, SCU1_UXCLK_CTRL), ++ PLL_CLK(SCU1_CLK_HUARTX, CLK_UART_PLL, "huartxclk", SCU1_CLK_HUXCLK, SCU1_HUXCLK_CTRL), ++ MUX_CLK(SCU1_CLK_UART0, "uart0clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 0, 1), ++ MUX_CLK(SCU1_CLK_UART1, "uart1clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 1, 1), ++ MUX_CLK(SCU1_CLK_UART2, "uart2clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 2, 1), ++ MUX_CLK(SCU1_CLK_UART3, "uart3clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 3, 1), ++ MUX_CLK(SCU1_CLK_UART5, "uart5clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 5, 1), ++ MUX_CLK(SCU1_CLK_UART6, "uart6clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 6, 1), ++ MUX_CLK(SCU1_CLK_UART7, "uart7clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 7, 1), ++ MUX_CLK(SCU1_CLK_UART8, "uart8clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 8, 1), ++ MUX_CLK(SCU1_CLK_UART9, "uart9clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 9, 1), ++ MUX_CLK(SCU1_CLK_UART10, "uart10clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 10, 1), ++ MUX_CLK(SCU1_CLK_UART11, "uart11clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 11, 1), ++ MUX_CLK(SCU1_CLK_UART12, "uart12clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 12, 1), ++ FIXED_FACTOR_CLK(SCU1_CLK_UART13, "uart13clk", SCU1_CLK_HUARTX, 1, 1), ++ FIXED_FACTOR_CLK(SCU1_CLK_UART14, "uart14clk", SCU1_CLK_HUARTX, 1, 1), ++ GATE_CLK(SCU1_CLK_MAC0RCLK, CLK_GATE, "mac0rclk-gate", SCU1_CLK_RMII, ++ SCU1_MAC12_CLK_DLY, 29, 0), ++ GATE_CLK(SCU1_CLK_MAC1RCLK, CLK_GATE, "mac1rclk-gate", SCU1_CLK_RMII, ++ SCU1_MAC12_CLK_DLY, 30, 0), ++ GATE_CLK(SCU1_CLK_GATE_LCLK0, CLK_GATE_ASPEED, "lclk0-gate", -1, ++ SCU1_CLK_STOP, 0, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_LCLK1, CLK_GATE_ASPEED, "lclk1-gate", -1, ++ SCU1_CLK_STOP, 1, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_ESPI0CLK, CLK_GATE_ASPEED, "espi0clk-gate", -1, ++ SCU1_CLK_STOP, 2, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_ESPI1CLK, CLK_GATE_ASPEED, "espi1clk-gate", -1, ++ SCU1_CLK_STOP, 3, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_SDCLK, CLK_GATE_ASPEED, "sdclk-gate", SCU1_CLK_SDCLK, ++ SCU1_CLK_STOP, 4, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_IPEREFCLK, CLK_GATE_ASPEED, "soc1-iperefclk-gate", -1, ++ SCU1_CLK_STOP, 5, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc1-refclk-gate", -1, ++ SCU1_CLK_STOP, 6, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_LPCHCLK, CLK_GATE_ASPEED, "lpchclk-gate", -1, ++ SCU1_CLK_STOP, 7, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_MAC0CLK, CLK_GATE_ASPEED, "mac0clk-gate", -1, ++ SCU1_CLK_STOP, 8, 0), ++ GATE_CLK(SCU1_CLK_GATE_MAC1CLK, CLK_GATE_ASPEED, "mac1clk-gate", -1, ++ SCU1_CLK_STOP, 9, 0), ++ GATE_CLK(SCU1_CLK_GATE_MAC2CLK, CLK_GATE_ASPEED, "mac2clk-gate", -1, ++ SCU1_CLK_STOP, 10, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART0CLK, CLK_GATE_ASPEED, "uart0clk-gate", SCU1_CLK_UART0, ++ SCU1_CLK_STOP, 11, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART1CLK, CLK_GATE_ASPEED, "uart1clk-gate", SCU1_CLK_UART1, ++ SCU1_CLK_STOP, 12, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART2CLK, CLK_GATE_ASPEED, "uart2clk-gate", SCU1_CLK_UART2, ++ SCU1_CLK_STOP, 13, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART3CLK, CLK_GATE_ASPEED, "uart3clk-gate", SCU1_CLK_UART3, ++ SCU1_CLK_STOP, 14, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_I2CCLK, CLK_GATE_ASPEED, "i2cclk-gate", -1, SCU1_CLK_STOP, 15, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C0CLK, CLK_GATE_ASPEED, "i3c0clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 16, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C1CLK, CLK_GATE_ASPEED, "i3c1clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 17, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C2CLK, CLK_GATE_ASPEED, "i3c2clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 18, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C3CLK, CLK_GATE_ASPEED, "i3c3clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 19, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C4CLK, CLK_GATE_ASPEED, "i3c4clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 20, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C5CLK, CLK_GATE_ASPEED, "i3c5clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 21, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C6CLK, CLK_GATE_ASPEED, "i3c6clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 22, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C7CLK, CLK_GATE_ASPEED, "i3c7clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 23, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C8CLK, CLK_GATE_ASPEED, "i3c8clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 24, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C9CLK, CLK_GATE_ASPEED, "i3c9clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 25, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C10CLK, CLK_GATE_ASPEED, "i3c10clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 26, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C11CLK, CLK_GATE_ASPEED, "i3c11clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 27, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C12CLK, CLK_GATE_ASPEED, "i3c12clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 28, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C13CLK, CLK_GATE_ASPEED, "i3c13clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 29, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C14CLK, CLK_GATE_ASPEED, "i3c14clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 30, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C15CLK, CLK_GATE_ASPEED, "i3c15clk-gate", SCU1_CLK_I3C, ++ SCU1_CLK_STOP, 31, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART5CLK, CLK_GATE_ASPEED, "uart5clk-gate", SCU1_CLK_UART5, ++ SCU1_CLK_STOP2, 0, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART6CLK, CLK_GATE_ASPEED, "uart6clk-gate", SCU1_CLK_UART6, ++ SCU1_CLK_STOP2, 1, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART7CLK, CLK_GATE_ASPEED, "uart7clk-gate", SCU1_CLK_UART7, ++ SCU1_CLK_STOP2, 2, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART8CLK, CLK_GATE_ASPEED, "uart8clk-gate", SCU1_CLK_UART8, ++ SCU1_CLK_STOP2, 3, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART9CLK, CLK_GATE_ASPEED, "uart9clk-gate", SCU1_CLK_UART9, ++ SCU1_CLK_STOP2, 4, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART10CLK, CLK_GATE_ASPEED, "uart10clk-gate", SCU1_CLK_UART10, ++ SCU1_CLK_STOP2, 5, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART11CLK, CLK_GATE_ASPEED, "uart11clk-gate", SCU1_CLK_UART11, ++ SCU1_CLK_STOP2, 6, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART12CLK, CLK_GATE_ASPEED, "uart12clk-gate", SCU1_CLK_UART12, ++ SCU1_CLK_STOP2, 7, 0), ++ GATE_CLK(SCU1_CLK_GATE_FSICLK, CLK_GATE_ASPEED, "fsiclk-gate", -1, SCU1_CLK_STOP2, 8, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPIPHYCLK, CLK_GATE_ASPEED, "ltpiphyclk-gate", -1, ++ SCU1_CLK_STOP2, 9, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPICLK, CLK_GATE_ASPEED, "ltpiclk-gate", -1, ++ SCU1_CLK_STOP2, 10, 0), ++ GATE_CLK(SCU1_CLK_GATE_VGALCLK, CLK_GATE_ASPEED, "vgalclk-gate", -1, ++ SCU1_CLK_STOP2, 11, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "usbuartclk-gate", -1, ++ SCU1_CLK_STOP2, 12, 0), ++ GATE_CLK(SCU1_CLK_GATE_CANCLK, CLK_GATE_ASPEED, "canclk-gate", SCU1_CLK_CAN, ++ SCU1_CLK_STOP2, 13, 0), ++ GATE_CLK(SCU1_CLK_GATE_PCICLK, CLK_GATE_ASPEED, "pciclk-gate", -1, ++ SCU1_CLK_STOP2, 14, 0), ++ GATE_CLK(SCU1_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc1-sliclk-gate", -1, ++ SCU1_CLK_STOP2, 15, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_E2MCLK, CLK_GATE_ASPEED, "soc1-e2m-gate", -1, ++ SCU1_CLK_STOP2, 16, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_PORTCUSB2CLK, CLK_GATE_ASPEED, "portcusb2-gate", -1, ++ SCU1_CLK_STOP2, 17, 0), ++ GATE_CLK(SCU1_CLK_GATE_PORTDUSB2CLK, CLK_GATE_ASPEED, "portdusb2-gate", -1, ++ SCU1_CLK_STOP2, 18, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPI1TXCLK, CLK_GATE_ASPEED, "ltp1tx-gate", -1, ++ SCU1_CLK_STOP2, 19, 0), ++}; ++ ++static const struct ast2700_clk_info ast2755_scu1_clk_info[] __initconst = { ++ FIXED_CLK(SCU1_CLKIN, "soc1-clkin", 25 * HZ_PER_MHZ), ++ PLL_CLK(SCU1_CLK_HPLL, CLK_PLL, "soc1-hpll", SCU1_CLKIN, SCU1_HPLL_PARAM), ++ PLL_CLK(SCU1_CLK_APLL, CLK_PLL, "soc1-apll", SCU1_CLKIN, SCU1_APLL_PARAM), ++ PLL_CLK(SCU1_CLK_DPLL, CLK_PLL, "soc1-dpll", SCU1_CLKIN, SCU1_DPLL_PARAM), ++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV2, "soc1-apll_div2", SCU1_CLK_APLL, 1, 2), ++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV4, "soc1-apll_div4", SCU1_CLK_APLL, 1, 4), ++ FIXED_FACTOR_CLK(SCU1_CLK_CAN, "canclk", SCU1_CLK_APLL, 1, 10), ++ DIVIDER_CLK(SCU1_CLK_APB, "soc1-apb", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 18, 3, ast2700_clk_div_table2), ++ DIVIDER_CLK(SCU1_CLK_RMII, "rmii", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 21, 3, ast2700_rmii_div_table), ++ DIVIDER_CLK(SCU1_CLK_RGMII, "rgmii", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 25, 3, ast2700_rgmii_div_table), ++ DIVIDER_CLK(SCU1_CLK_MACHCLK, "machclk", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL1, 29, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_APLL_DIVN, "soc1-apll_divn", ++ SCU1_CLK_APLL, SCU1_CLK_SEL2, 8, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_AHB, "soc1-ahb", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL2, 20, 3, ast2700_clk_div_table), ++ DIVIDER_CLK(SCU1_CLK_I3C, "soc1-i3c", SCU1_CLK_HPLL, ++ SCU1_CLK_SEL2, 23, 3, ast2700_clk_div_table), ++ MUX_CLK(SCU1_CLK_SDMUX, "sdclk-mux", sdclk_parent_ids, ARRAY_SIZE(sdclk_parent_ids), ++ sdclk_parent_hws, SCU1_CLK_SEL1, 13, 1), ++ MUX_CLK(SCU1_CLK_UXCLK, "uxclk", uxclk_parent_ids, ARRAY_SIZE(uxclk_parent_ids), ++ uxclk_parent_hws, SCU1_CLK_SEL2, 0, 2), ++ MUX_CLK(SCU1_CLK_HUXCLK, "huxclk", uxclk_parent_ids, ARRAY_SIZE(uxclk_parent_ids), ++ uxclk_parent_hws, SCU1_CLK_SEL2, 3, 2), ++ DIVIDER_CLK(SCU1_CLK_SDCLK, "sdclk", SCU1_CLK_SDMUX, ++ SCU1_CLK_SEL1, 14, 3, ast2700_clk_div_table), ++ PLL_CLK(SCU1_CLK_UARTX, CLK_UART_PLL, "uartxclk", SCU1_CLK_UXCLK, SCU1_UXCLK_CTRL), ++ PLL_CLK(SCU1_CLK_HUARTX, CLK_UART_PLL, "huartxclk", SCU1_CLK_HUXCLK, SCU1_HUXCLK_CTRL), ++ MUX_CLK(SCU1_CLK_UART0, "uart0clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 0, 1), ++ MUX_CLK(SCU1_CLK_UART1, "uart1clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 1, 1), ++ MUX_CLK(SCU1_CLK_UART2, "uart2clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 2, 1), ++ MUX_CLK(SCU1_CLK_UART3, "uart3clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 3, 1), ++ MUX_CLK(SCU1_CLK_UART5, "uart5clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 5, 1), ++ MUX_CLK(SCU1_CLK_UART6, "uart6clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 6, 1), ++ MUX_CLK(SCU1_CLK_UART7, "uart7clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 7, 1), ++ MUX_CLK(SCU1_CLK_UART8, "uart8clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 8, 1), ++ MUX_CLK(SCU1_CLK_UART9, "uart9clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 9, 1), ++ MUX_CLK(SCU1_CLK_UART10, "uart10clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 10, 1), ++ MUX_CLK(SCU1_CLK_UART11, "uart11clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 11, 1), ++ MUX_CLK(SCU1_CLK_UART12, "uart12clk", uartx_parent_ids, ARRAY_SIZE(uartx_parent_ids), ++ uartx_parent_hws, SCU1_CLK_SEL1, 12, 1), ++ FIXED_FACTOR_CLK(SCU1_CLK_UART13, "uart13clk", SCU1_CLK_HUARTX, 1, 1), ++ FIXED_FACTOR_CLK(SCU1_CLK_UART14, "uart14clk", SCU1_CLK_HUARTX, 1, 1), ++ GATE_CLK(SCU1_CLK_MAC0RCLK, CLK_GATE, "mac0rclk-gate", SCU1_CLK_RMII, ++ SCU1_MAC12_CLK_DLY, 29, 0), ++ GATE_CLK(SCU1_CLK_MAC1RCLK, CLK_GATE, "mac1rclk-gate", SCU1_CLK_RMII, ++ SCU1_MAC12_CLK_DLY, 30, 0), ++ GATE_CLK(SCU1_CLK_GATE_LCLK0, CLK_GATE_ASPEED, "lclk0-gate", -1, ++ SCU1_CLK_STOP, 0, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_LCLK1, CLK_GATE_ASPEED, "lclk1-gate", -1, ++ AST2755_SCU1_CLK_STOP, 1, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_ESPI0CLK, CLK_GATE_ASPEED, "espi0clk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 2, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_ESPI1CLK, CLK_GATE_ASPEED, "espi1clk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 3, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_SDCLK, CLK_GATE_ASPEED, "sdclk-gate", SCU1_CLK_SDCLK, ++ AST2755_SCU1_CLK_STOP, 4, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_IPEREFCLK, CLK_GATE_ASPEED, "soc1-iperefclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 5, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc1-refclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 6, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_LPCHCLK, CLK_GATE_ASPEED, "lpchclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 7, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_MAC0CLK, CLK_GATE_ASPEED, "mac0clk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 8, 0), ++ GATE_CLK(SCU1_CLK_GATE_MAC1CLK, CLK_GATE_ASPEED, "mac1clk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 9, 0), ++ GATE_CLK(SCU1_CLK_GATE_MAC2CLK, CLK_GATE_ASPEED, "mac2clk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 10, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART0CLK, CLK_GATE_ASPEED, "uart0clk-gate", SCU1_CLK_UART0, ++ AST2755_SCU1_CLK_STOP, 11, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART1CLK, CLK_GATE_ASPEED, "uart1clk-gate", SCU1_CLK_UART1, ++ AST2755_SCU1_CLK_STOP, 12, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART2CLK, CLK_GATE_ASPEED, "uart2clk-gate", SCU1_CLK_UART2, ++ AST2755_SCU1_CLK_STOP, 13, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART3CLK, CLK_GATE_ASPEED, "uart3clk-gate", SCU1_CLK_UART3, ++ AST2755_SCU1_CLK_STOP, 14, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_I2CCLK, CLK_GATE_ASPEED, "i2cclk-gate", -1, SCU1_CLK_STOP, 15, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C0CLK, CLK_GATE_ASPEED, "i3c0clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 16, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C1CLK, CLK_GATE_ASPEED, "i3c1clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 17, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C2CLK, CLK_GATE_ASPEED, "i3c2clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 18, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C3CLK, CLK_GATE_ASPEED, "i3c3clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 19, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C4CLK, CLK_GATE_ASPEED, "i3c4clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 20, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C5CLK, CLK_GATE_ASPEED, "i3c5clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 21, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C6CLK, CLK_GATE_ASPEED, "i3c6clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 22, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C7CLK, CLK_GATE_ASPEED, "i3c7clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 23, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C8CLK, CLK_GATE_ASPEED, "i3c8clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 24, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C9CLK, CLK_GATE_ASPEED, "i3c9clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 25, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C10CLK, CLK_GATE_ASPEED, "i3c10clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 26, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C11CLK, CLK_GATE_ASPEED, "i3c11clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 27, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C12CLK, CLK_GATE_ASPEED, "i3c12clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 28, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C13CLK, CLK_GATE_ASPEED, "i3c13clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 29, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C14CLK, CLK_GATE_ASPEED, "i3c14clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 30, 0), ++ GATE_CLK(SCU1_CLK_GATE_I3C15CLK, CLK_GATE_ASPEED, "i3c15clk-gate", SCU1_CLK_I3C, ++ AST2755_SCU1_CLK_STOP, 31, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART5CLK, CLK_GATE_ASPEED, "uart5clk-gate", SCU1_CLK_UART5, ++ AST2755_SCU1_CLK_STOP, 0, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART6CLK, CLK_GATE_ASPEED, "uart6clk-gate", SCU1_CLK_UART6, ++ AST2755_SCU1_CLK_STOP, 1, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART7CLK, CLK_GATE_ASPEED, "uart7clk-gate", SCU1_CLK_UART7, ++ AST2755_SCU1_CLK_STOP, 2, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART8CLK, CLK_GATE_ASPEED, "uart8clk-gate", SCU1_CLK_UART8, ++ AST2755_SCU1_CLK_STOP, 3, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UART9CLK, CLK_GATE_ASPEED, "uart9clk-gate", SCU1_CLK_UART9, ++ AST2755_SCU1_CLK_STOP, 4, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART10CLK, CLK_GATE_ASPEED, "uart10clk-gate", SCU1_CLK_UART10, ++ AST2755_SCU1_CLK_STOP, 5, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART11CLK, CLK_GATE_ASPEED, "uart11clk-gate", SCU1_CLK_UART11, ++ AST2755_SCU1_CLK_STOP, 6, 0), ++ GATE_CLK(SCU1_CLK_GATE_UART12CLK, CLK_GATE_ASPEED, "uart12clk-gate", SCU1_CLK_UART12, ++ AST2755_SCU1_CLK_STOP, 7, 0), ++ GATE_CLK(SCU1_CLK_GATE_FSICLK, CLK_GATE_ASPEED, "fsiclk-gate", -1, SCU1_CLK_STOP2, 8, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPIPHYCLK, CLK_GATE_ASPEED, "ltpiphyclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 9, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPICLK, CLK_GATE_ASPEED, "ltpiclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 10, 0), ++ GATE_CLK(SCU1_CLK_GATE_VGALCLK, CLK_GATE_ASPEED, "vgalclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 11, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "usbuartclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 12, 0), ++ GATE_CLK(SCU1_CLK_GATE_CANCLK, CLK_GATE_ASPEED, "canclk-gate", SCU1_CLK_CAN, ++ AST2755_SCU1_CLK_STOP, 13, 0), ++ GATE_CLK(SCU1_CLK_GATE_PCICLK, CLK_GATE_ASPEED, "pciclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 14, 0), ++ GATE_CLK(SCU1_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc1-sliclk-gate", -1, ++ AST2755_SCU1_CLK_STOP, 15, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_E2MCLK, CLK_GATE_ASPEED, "soc1-e2m-gate", -1, ++ AST2755_SCU1_CLK_STOP, 16, CLK_IS_CRITICAL), ++ GATE_CLK(SCU1_CLK_GATE_PORTCUSB2CLK, CLK_GATE_ASPEED, "portcusb2-gate", -1, ++ AST2755_SCU1_CLK_STOP, 17, 0), ++ GATE_CLK(SCU1_CLK_GATE_PORTDUSB2CLK, CLK_GATE_ASPEED, "portdusb2-gate", -1, ++ AST2755_SCU1_CLK_STOP, 18, 0), ++ GATE_CLK(SCU1_CLK_GATE_LTPI1TXCLK, CLK_GATE_ASPEED, "ltp1tx-gate", -1, ++ AST2755_SCU1_CLK_STOP, 19, 0), ++}; ++ ++static struct clk_hw *ast2700_clk_hw_register_fixed_display(void __iomem *reg, const char *name, ++ struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ unsigned int mult, div, r, n; ++ u32 xdclk; ++ u32 val; ++ ++ val = readl(clk_ctrl->base + SCU0_CLK_SEL2); ++ if (val & BIT(29)) ++ xdclk = 800 * HZ_PER_MHZ; ++ else ++ xdclk = 1000 * HZ_PER_MHZ; ++ ++ val = readl(reg); ++ r = val & GENMASK(15, 0); ++ n = (val >> 16) & GENMASK(15, 0); ++ mult = r; ++ div = 2 * n; ++ ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, 0, (xdclk * mult) / div); ++} ++ ++static struct clk_hw *ast2700_clk_hw_register_hpll(void __iomem *reg, ++ const char *name, const struct clk_hw *parent_hw, ++ struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ unsigned int mult, div; ++ u32 val; ++ ++ val = readl(clk_ctrl->base + SCU0_HWSTRAP1); ++ if ((readl(clk_ctrl->base) & REVISION_ID) && (val & BIT(3))) { ++ switch ((val & GENMASK(4, 2)) >> 2) { ++ case 2: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1800 * HZ_PER_MHZ); ++ case 3: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1700 * HZ_PER_MHZ); ++ case 6: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1200 * HZ_PER_MHZ); ++ case 7: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 800 * HZ_PER_MHZ); ++ default: ++ return ERR_PTR(-EINVAL); ++ } ++ } else if ((val & GENMASK(3, 2)) != 0) { ++ switch ((val & GENMASK(3, 2)) >> 2) { ++ case 1: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1900 * HZ_PER_MHZ); ++ case 2: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1800 * HZ_PER_MHZ); ++ case 3: ++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, ++ 0, 1700 * HZ_PER_MHZ); ++ default: ++ return ERR_PTR(-EINVAL); ++ } ++ } else { ++ val = readl(reg); ++ ++ if (val & BIT(24)) { ++ /* Pass through mode */ ++ mult = 1; ++ div = 1; ++ } else { ++ u32 m = val & 0x1fff; ++ u32 n = (val >> 13) & 0x3f; ++ u32 p = (val >> 19) & 0xf; ++ ++ mult = (m + 1) / (2 * (n + 1)); ++ div = p + 1; ++ } ++ } ++ ++ return devm_clk_hw_register_fixed_factor_parent_hw(clk_ctrl->dev, name, ++ parent_hw, 0, mult, div); ++} ++ ++static struct clk_hw *ast2700_clk_hw_register_pll(int clk_idx, void __iomem *reg, ++ const char *name, const struct clk_hw *parent_hw, ++ struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ int scu = clk_ctrl->clk_data->scu; ++ unsigned int mult, div; ++ u32 val = readl(reg); ++ ++ if (val & BIT(24)) { ++ /* Pass through mode */ ++ mult = 1; ++ div = 1; ++ } else { ++ u32 m = val & 0x1fff; ++ u32 n = (val >> 13) & 0x3f; ++ u32 p = (val >> 19) & 0xf; ++ ++ if (scu) { ++ mult = (m + 1) / (n + 1); ++ div = p + 1; ++ } else { ++ if (clk_idx == SCU0_CLK_MPLL) { ++ mult = m / (n + 1); ++ div = p + 1; ++ } else { ++ mult = (m + 1) / (2 * (n + 1)); ++ div = p + 1; ++ } ++ } ++ } ++ ++ return devm_clk_hw_register_fixed_factor_parent_hw(clk_ctrl->dev, name, ++ parent_hw, 0, mult, div); ++} ++ ++static struct clk_hw *ast2700_clk_hw_register_uartpll(void __iomem *reg, const char *name, ++ const struct clk_hw *parent_hw, ++ struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ unsigned int mult, div; ++ u32 val = readl(reg); ++ u32 r = val & 0xff; ++ u32 n = (val >> 8) & 0x3ff; ++ ++ mult = r; ++ div = n * 2; ++ ++ return devm_clk_hw_register_fixed_factor_parent_hw(clk_ctrl->dev, name, ++ parent_hw, 0, mult, div); ++} ++ ++static struct clk_hw *ast2700_clk_hw_register_misc(int clk_idx, void __iomem *reg, ++ const char *name, const struct clk_hw *parent_hw, ++ struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ u32 div = 0; ++ ++ if (clk_idx == SCU0_CLK_MPHY) { ++ div = readl(reg) + 1; ++ } else if (clk_idx == SCU0_CLK_U2PHY_REFCLK) { ++ if (readl(clk_ctrl->base) & REVISION_ID) ++ div = (GET_USB_REFCLK_DIV(readl(reg)) + 1) << 4; ++ else ++ div = (GET_USB_REFCLK_DIV(readl(reg)) + 1) << 1; ++ } else { ++ return ERR_PTR(-EINVAL); ++ } ++ ++ return devm_clk_hw_register_fixed_factor_parent_hw(clk_ctrl->dev, name, ++ parent_hw, 0, 1, div); ++} ++ ++static int ast2700_clk_is_enabled(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ u32 reg; ++ ++ reg = readl(gate->reg); ++ ++ return !(reg & clk); ++} ++ ++static int ast2700_clk_enable(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ ++ if (readl(gate->reg) & clk) ++ writel(clk, gate->reg + 0x04); ++ ++ return 0; ++} ++ ++static void ast2700_clk_disable(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ u32 clk = BIT(gate->bit_idx); ++ ++ /* Clock is set to enable, so use write to set register */ ++ writel(clk, gate->reg); ++} ++ ++static const struct clk_ops ast2700_clk_gate_ops = { ++ .enable = ast2700_clk_enable, ++ .disable = ast2700_clk_disable, ++ .is_enabled = ast2700_clk_is_enabled, ++}; ++ ++static struct clk_hw *ast2700_clk_hw_register_gate(struct device *dev, const char *name, ++ const struct clk_hw *parent_hw, ++ void __iomem *reg, u8 clock_idx, ++ unsigned long flags, spinlock_t *lock) ++{ ++ struct clk_init_data init; ++ struct clk_gate *gate; ++ struct clk_hw *hw; ++ int ret = -EINVAL; ++ ++ gate = kzalloc(sizeof(*gate), GFP_KERNEL); ++ if (!gate) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ init.ops = &ast2700_clk_gate_ops; ++ init.flags = flags; ++ init.parent_names = NULL; ++ init.parent_hws = parent_hw ? &parent_hw : NULL; ++ init.parent_data = NULL; ++ init.num_parents = parent_hw ? 1 : 0; ++ ++ gate->reg = reg; ++ gate->bit_idx = clock_idx; ++ gate->flags = 0; ++ gate->lock = lock; ++ gate->hw.init = &init; ++ ++ hw = &gate->hw; ++ ret = clk_hw_register(dev, hw); ++ if (ret) { ++ kfree(gate); ++ hw = ERR_PTR(ret); ++ } ++ ++ return hw; ++} ++ ++static void ast2700_soc1_configure_mac01_clk(struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ struct device_node *np = clk_ctrl->dev->of_node; ++ struct mac_delay_config mac_cfg; ++ u32 reg[3]; ++ int ret; ++ ++ if (readl(clk_ctrl->base) & REVISION_ID) { ++ if ((readl(clk_ctrl->base + SCU1_MAC12_CLK_DLY) & GENMASK(25, 0)) == 0) ++ reg[0] = AST2700_DEF_MAC12_DELAY_1G_A1; ++ else ++ reg[0] = readl(clk_ctrl->base + SCU1_MAC12_CLK_DLY); ++ } else { ++ reg[0] = AST2700_DEF_MAC12_DELAY_1G_A0; ++ } ++ reg[1] = AST2700_DEF_MAC12_DELAY_100M; ++ reg[2] = AST2700_DEF_MAC12_DELAY_10M; ++ ++ ret = of_property_read_u32_array(np, "mac0-clk-delay", (u32 *)&mac_cfg, ++ sizeof(mac_cfg) / sizeof(u32)); ++ if (!ret) { ++ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_1 | MAC_CLK_1G_OUTPUT_DELAY_1); ++ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_1, mac_cfg.rx_delay_1000) | ++ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_1, mac_cfg.tx_delay_1000); ++ ++ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1); ++ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_100) | ++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_100); ++ ++ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1); ++ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_10) | ++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_10); ++ } ++ ++ ret = of_property_read_u32_array(np, "mac1-clk-delay", (u32 *)&mac_cfg, ++ sizeof(mac_cfg) / sizeof(u32)); ++ if (!ret) { ++ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_2 | MAC_CLK_1G_OUTPUT_DELAY_2); ++ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_2, mac_cfg.rx_delay_1000) | ++ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_2, mac_cfg.tx_delay_1000); ++ ++ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2); ++ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_100) | ++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_100); ++ ++ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2); ++ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_10) | ++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_10); ++ } ++ ++ reg[0] |= (readl(clk_ctrl->base + SCU1_MAC12_CLK_DLY) & ~GENMASK(25, 0)); ++ writel(reg[0], clk_ctrl->base + SCU1_MAC12_CLK_DLY); ++ writel(reg[1], clk_ctrl->base + SCU1_MAC12_CLK_DLY_100M); ++ writel(reg[2], clk_ctrl->base + SCU1_MAC12_CLK_DLY_10M); ++} ++ ++static void ast2700_soc1_configure_i3c_clk(struct ast2700_clk_ctrl *clk_ctrl) ++{ ++ if (readl(clk_ctrl->base) & REVISION_ID) { ++ u32 val; ++ ++ /* I3C 250MHz = HPLL/4 */ ++ val = readl(clk_ctrl->base + SCU1_CLK_SEL2) & ~SCU1_CLK_I3C_DIV_MASK; ++ val |= FIELD_PREP(SCU1_CLK_I3C_DIV_MASK, SCU1_CLK_I3C_DIV(4)); ++ writel(val, clk_ctrl->base + SCU1_CLK_SEL2); ++ } ++} ++ ++static inline const struct clk_hw *get_parent_hw_or_null(struct clk_hw **hws, int idx) ++{ ++ if (idx < 0) ++ return NULL; ++ else ++ return hws[idx]; ++} ++ ++static int ast2700_soc_clk_probe(struct platform_device *pdev) ++{ ++ const struct ast2700_clk_data *clk_data; ++ struct clk_hw_onecell_data *clk_hw_data; ++ struct ast2700_clk_ctrl *clk_ctrl; ++ struct device *dev = &pdev->dev; ++ u32 uart_clk_source = 0; ++ void __iomem *clk_base; ++ struct clk_hw **hws; ++ char *reset_name; ++ int ret; ++ int i; ++ ++ clk_ctrl = devm_kzalloc(dev, sizeof(*clk_ctrl), GFP_KERNEL); ++ if (!clk_ctrl) ++ return -ENOMEM; ++ clk_ctrl->dev = dev; ++ dev_set_drvdata(&pdev->dev, clk_ctrl); ++ ++ spin_lock_init(&clk_ctrl->lock); ++ ++ clk_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(clk_base)) ++ return PTR_ERR(clk_base); ++ ++ clk_ctrl->base = clk_base; ++ ++ clk_data = (struct ast2700_clk_data *)device_get_match_data(dev); ++ if (!clk_data) ++ return -ENODEV; ++ ++ clk_ctrl->clk_data = clk_data; ++ reset_name = devm_kasprintf(dev, GFP_KERNEL, "reset%d", clk_data->scu); ++ ++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, clk_data->nr_clks), ++ GFP_KERNEL); ++ if (!clk_hw_data) ++ return -ENOMEM; ++ ++ clk_hw_data->num = clk_data->nr_clks; ++ hws = clk_hw_data->hws; ++ ++ if (clk_data->scu) { ++ of_property_read_u32(dev->of_node, "uart-clk-source", &uart_clk_source); ++ if (uart_clk_source) { ++ u32 val = readl(clk_base + SCU1_CLK_SEL1) & ~GENMASK(12, 0); ++ ++ uart_clk_source &= GENMASK(12, 0); ++ writel(val | uart_clk_source, clk_base + SCU1_CLK_SEL1); ++ } ++ ++ ast2700_soc1_configure_mac01_clk(clk_ctrl); ++ ast2700_soc1_configure_i3c_clk(clk_ctrl); ++ } ++ ++ for (i = 0; i < clk_data->nr_clks; i++) { ++ const struct ast2700_clk_info *clk = &clk_data->clk_info[i]; ++ const struct clk_hw *phw = NULL; ++ unsigned int id = clk->id; ++ void __iomem *reg = NULL; ++ ++ if (id >= clk_hw_data->num || hws[id]) { ++ dev_err(dev, "clk id %u invalid for %s\n", id, clk->name); ++ return -EINVAL; ++ } ++ ++ if (clk->type == CLK_FIXED) { ++ const struct ast2700_clk_fixed_rate_data *fixed_rate = &clk->data.rate; ++ ++ hws[id] = devm_clk_hw_register_fixed_rate(dev, clk->name, NULL, 0, ++ fixed_rate->fixed_rate); ++ } else if (clk->type == CLK_FIXED_FACTOR) { ++ const struct ast2700_clk_fixed_factor_data *factor = &clk->data.factor; ++ ++ phw = hws[factor->parent_id]; ++ hws[id] = devm_clk_hw_register_fixed_factor_parent_hw(dev, clk->name, ++ phw, 0, factor->mult, ++ factor->div); ++ } else if (clk->type == CLK_FIXED_DISPLAY) { ++ reg = clk_ctrl->base + clk->data.display_rate.reg; ++ ++ hws[id] = ast2700_clk_hw_register_fixed_display(reg, clk->name, clk_ctrl); ++ } else if (clk->type == CLK_HPLL) { ++ const struct ast2700_clk_pll_data *pll = &clk->data.pll; ++ ++ reg = clk_ctrl->base + pll->reg; ++ phw = hws[pll->parent_id]; ++ hws[id] = ast2700_clk_hw_register_hpll(reg, clk->name, phw, clk_ctrl); ++ } else if (clk->type == CLK_PLL) { ++ const struct ast2700_clk_pll_data *pll = &clk->data.pll; ++ ++ reg = clk_ctrl->base + pll->reg; ++ phw = hws[pll->parent_id]; ++ hws[id] = ast2700_clk_hw_register_pll(id, reg, clk->name, phw, clk_ctrl); ++ } else if (clk->type == CLK_UART_PLL) { ++ const struct ast2700_clk_pll_data *pll = &clk->data.pll; ++ ++ reg = clk_ctrl->base + pll->reg; ++ phw = hws[pll->parent_id]; ++ hws[id] = ast2700_clk_hw_register_uartpll(reg, clk->name, phw, clk_ctrl); ++ } else if (clk->type == CLK_MUX) { ++ const struct ast2700_clk_mux_data *mux = &clk->data.mux; ++ ++ reg = clk_ctrl->base + mux->reg; ++ for (int j = 0; j < mux->num_parents; j++) { ++ unsigned int pid = mux->parent_ids[j]; ++ ++ mux->parent_hws[j] = hws[pid]; ++ } ++ ++ hws[id] = devm_clk_hw_register_mux_parent_hws(dev, clk->name, ++ mux->parent_hws, ++ mux->num_parents, 0, ++ reg, mux->bit_shift, ++ mux->bit_width, 0, ++ &clk_ctrl->lock); ++ } else if (clk->type == CLK_MISC) { ++ const struct ast2700_clk_pll_data *pll = &clk->data.pll; ++ ++ reg = clk_ctrl->base + pll->reg; ++ phw = hws[pll->parent_id]; ++ hws[id] = ast2700_clk_hw_register_misc(id, reg, clk->name, phw, clk_ctrl); ++ } else if (clk->type == CLK_DIVIDER) { ++ const struct ast2700_clk_div_data *divider = &clk->data.div; ++ ++ reg = clk_ctrl->base + divider->reg; ++ phw = hws[divider->parent_id]; ++ hws[id] = clk_hw_register_divider_table_parent_hw(dev, clk->name, ++ phw, ++ 0, reg, ++ divider->bit_shift, ++ divider->bit_width, 0, ++ divider->div_table, ++ &clk_ctrl->lock); ++ } else if (clk->type == CLK_GATE_ASPEED) { ++ const struct ast2700_clk_gate_data *gate = &clk->data.gate; ++ ++ phw = get_parent_hw_or_null(hws, gate->parent_id); ++ reg = clk_ctrl->base + gate->reg; ++ hws[id] = ast2700_clk_hw_register_gate(dev, clk->name, phw, reg, gate->bit, ++ gate->flags, &clk_ctrl->lock); ++ } else { ++ const struct ast2700_clk_gate_data *gate = &clk->data.gate; ++ ++ phw = get_parent_hw_or_null(hws, gate->parent_id); ++ reg = clk_ctrl->base + gate->reg; ++ hws[id] = devm_clk_hw_register_gate_parent_hw(dev, clk->name, phw, ++ gate->flags, reg, gate->bit, ++ 0, &clk_ctrl->lock); ++ } ++ ++ if (IS_ERR(hws[id])) ++ return PTR_ERR(hws[id]); ++ } ++ ++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data); ++ if (ret) ++ return ret; ++ ++ return aspeed_reset_controller_register(dev, clk_base, reset_name); ++} ++ ++static const struct ast2700_clk_data ast2700_clk0_data = { ++ .scu = 0, ++ .clk_info = ast2700_scu0_clk_info, ++ .nr_clks = ARRAY_SIZE(ast2700_scu0_clk_info), ++}; ++ ++static const struct ast2700_clk_data ast2700_clk1_data = { ++ .scu = 1, ++ .clk_info = ast2700_scu1_clk_info, ++ .nr_clks = ARRAY_SIZE(ast2700_scu1_clk_info), ++}; ++ ++static const struct ast2700_clk_data ast2755_clk1_data = { ++ .scu = 2, ++ .clk_info = ast2755_scu1_clk_info, ++ .nr_clks = ARRAY_SIZE(ast2755_scu1_clk_info), ++}; ++ ++static const struct of_device_id ast2700_scu_match[] = { ++ { .compatible = "aspeed,ast2700-scu0", .data = &ast2700_clk0_data }, ++ { .compatible = "aspeed,ast2700-scu1", .data = &ast2700_clk1_data }, ++ { .compatible = "aspeed,ast2755-scu1", .data = &ast2755_clk1_data }, ++ { /* sentinel */ } ++}; ++ ++MODULE_DEVICE_TABLE(of, ast2700_scu_match); ++ ++static struct platform_driver ast2700_scu_driver = { ++ .probe = ast2700_soc_clk_probe, ++ .driver = { ++ .name = "clk-ast2700", ++ .of_match_table = ast2700_scu_match, ++ }, ++}; ++ ++static int __init clk_ast2700_init(void) ++{ ++ return platform_driver_register(&ast2700_scu_driver); ++} ++arch_initcall(clk_ast2700_init); +diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig +--- a/drivers/crypto/aspeed/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/Kconfig 2025-12-23 10:16:21.140032401 +0000 +@@ -1,26 +1,32 @@ + config CRYPTO_DEV_ASPEED +- tristate "Support for Aspeed cryptographic engine driver" ++ bool "Support for Aspeed cryptographic engine driver" + depends on ARCH_ASPEED || COMPILE_TEST + select CRYPTO_ENGINE + help +- Hash and Crypto Engine (HACE) is designed to accelerate the +- throughput of hash data digest, encryption and decryption. ++ Say Y here to get to see options for Aspeed hardware crypto devices + +- Select y here to have support for the cryptographic driver +- available on Aspeed SoC. ++if CRYPTO_DEV_ASPEED + + config CRYPTO_DEV_ASPEED_DEBUG + bool "Enable Aspeed crypto debug messages" +- depends on CRYPTO_DEV_ASPEED + help + Print Aspeed crypto debugging messages if you use this + option to ask for those messages. + Avoid enabling this option for production build to + minimize driver timing. + ++config CRYPTO_DEV_ASPEED_HACE ++ tristate "Enable Aspeed Hash & Crypto Engine (HACE) Engine" ++ help ++ Hash and Crypto Engine (HACE) is designed to accelerate the ++ throughput of hash data digest, encryption and decryption. ++ ++ Select y here to have support for the cryptographic driver ++ available on Aspeed SoC. ++ + config CRYPTO_DEV_ASPEED_HACE_HASH + bool "Enable Aspeed Hash & Crypto Engine (HACE) hash" +- depends on CRYPTO_DEV_ASPEED ++ depends on CRYPTO_DEV_ASPEED_HACE + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 +@@ -33,25 +39,56 @@ + + config CRYPTO_DEV_ASPEED_HACE_CRYPTO + bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto" +- depends on CRYPTO_DEV_ASPEED ++ depends on CRYPTO_DEV_ASPEED_HACE + select CRYPTO_AES + select CRYPTO_DES + select CRYPTO_ECB + select CRYPTO_CBC ++ select CRYPTO_CFB ++ select CRYPTO_OFB + select CRYPTO_CTR + help + Select here to enable Aspeed Hash & Crypto Engine (HACE) + crypto driver. + Supports AES/DES symmetric-key encryption and decryption +- with ECB/CBC/CTR options. ++ with ECB/CBC/CFB/OFB/CTR options. + + config CRYPTO_DEV_ASPEED_ACRY +- bool "Enable Aspeed ACRY RSA Engine" +- depends on CRYPTO_DEV_ASPEED +- select CRYPTO_ENGINE ++ tristate "Enable Aspeed ACRY RSA Engine" ++ depends on MACH_ASPEED_G6 + select CRYPTO_RSA + help + Select here to enable Aspeed ECC/RSA Engine (ACRY) + RSA driver. + Supports 256 bits to 4096 bits RSA encryption/decryption + and signature/verification. ++ ++config CRYPTO_DEV_ASPEED_RSSS ++ tristate "Enable Aspeed RSSS Engine" ++ depends on ARCH_ASPEED ++ select CRYPTO_RSA ++ select CRYPTO_SHA3 ++ select CRYPTO_SM3 ++ select CRYPTO_SM4 ++ help ++ Select here to enable Aspeed RSSS Engine driver. ++ Supports RSA 512 to 4096 bits encryption/decryption and ++ signature/verification, SHA3-224/256/384/512 and XOF of ++ SHAKE 128/256, SM3 Hash crypto, SM4 ECB/CBC/CFB/OFB/CTR ++ cryptographic algorithms. ++ It's a new hardware design from ast2700 for simply SRAM ++ layout. ++ ++config CRYPTO_DEV_ASPEED_ECDSA ++ tristate "Enable Aspeed ECDSA Engine" ++ depends on ARCH_ASPEED ++ select CRYPTO_ECC ++ select CRYPTO_ECDSA ++ select CRYPTO_AKCIPHER ++ help ++ Select here to enable Aspeed ECC Engine for ECDSA driver. ++ Supports ECDSA (Elliptic Curve Digital Signature Algorithm) ++ using curves P-256, P-384. ++ Only signature verification is implemented. ++ ++endif #CRYPTO_DEV_ASPEED +diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile +--- a/drivers/crypto/aspeed/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/Makefile 2025-12-23 10:16:21.140032401 +0000 +@@ -1,11 +1,14 @@ + hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o + hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o + +-obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o ++obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE) += aspeed_crypto.o + aspeed_crypto-objs := aspeed-hace.o \ + $(hace-hash-y) \ + $(hace-crypto-y) + +-aspeed_acry-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o ++obj-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o + +-obj-$(CONFIG_CRYPTO_DEV_ASPEED) += $(aspeed_acry-y) ++obj-$(CONFIG_CRYPTO_DEV_ASPEED_RSSS) += aspeed_rsss.o ++aspeed_rsss-objs := aspeed-rsss.o aspeed-rsss-rsa.o aspeed-rsss-hash.o ++ ++obj-$(CONFIG_CRYPTO_DEV_ASPEED_ECDSA) += aspeed-ecdsa.o +diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c +--- a/drivers/crypto/aspeed/aspeed-acry.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-acry.c 2025-12-23 10:16:21.140032401 +0000 +@@ -808,7 +808,7 @@ + + static struct platform_driver aspeed_acry_driver = { + .probe = aspeed_acry_probe, +- .remove_new = aspeed_acry_remove, ++ .remove = aspeed_acry_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_acry_of_matches, +diff --git a/drivers/crypto/aspeed/aspeed-ecdsa.c b/drivers/crypto/aspeed/aspeed-ecdsa.c +--- a/drivers/crypto/aspeed/aspeed-ecdsa.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-ecdsa.c 2025-12-23 10:16:21.140032401 +0000 +@@ -0,0 +1,779 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "aspeed-ecdsa.h" ++ ++//#define ASPEED_ECDSA_IRQ_MODE ++ ++static int aspeed_ecdsa_self_test(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ u32 val; ++ ++ ast_write(ecdsa_dev, ECC_EN, ASPEED_ECC_CTRL_REG); ++ val = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG); ++ if (val != ECC_EN) ++ return -EIO; ++ ++ ast_write(ecdsa_dev, 0x0, ASPEED_ECC_CTRL_REG); ++ val = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG); ++ if (val) ++ return -EIO; ++ ++ return 0; ++} ++ ++static inline struct akcipher_request * ++ akcipher_request_cast(struct crypto_async_request *req) ++{ ++ return container_of(req, struct akcipher_request, base); ++} ++ ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG ++static void hexdump(const char *name, unsigned char *buf, unsigned int len) ++{ ++ pr_info("%s\n", name); ++ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, ++ 16, 1, ++ buf, len, false); ++} ++#else ++static void hexdump(const char *name, unsigned char *buf, unsigned int len) ++{ ++ /* empty */ ++} ++#endif ++ ++static void buff_reverse(u8 *dst, u8 *src, int len) ++{ ++ for (int i = 0; i < len; i++) ++ dst[len - i - 1] = src[i]; ++} ++ ++static bool aspeed_ecdsa_need_fallback(struct aspeed_ecc_ctx *ctx, int d_len) ++{ ++ int curve_id = ctx->curve_id; ++ ++ switch (curve_id) { ++ case ECC_CURVE_NIST_P256: ++ if (d_len != SHA256_DIGEST_SIZE) ++ return true; ++ break; ++ case ECC_CURVE_NIST_P384: ++ if (d_len != SHA384_DIGEST_SIZE) ++ return true; ++ break; ++ } ++ ++ return false; ++} ++ ++#ifndef ASPEED_ECDSA_IRQ_MODE ++static int aspeed_ecdsa_wait_complete(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ u32 sts; ++ int ret; ++ ++ ret = readl_poll_timeout(ecdsa_dev->regs + ASPEED_ECC_STS_REG, sts, ++ ((sts & ECC_IDLE) == ECC_IDLE), ++ ASPEED_ECC_POLLING_TIME, ++ ASPEED_ECC_TIMEOUT * 10); ++ if (ret) { ++ dev_err(ecdsa_dev->dev, "ECC engine wrong status\n"); ++ return -EIO; ++ } ++ ++ sts = ast_read(ecdsa_dev, ASPEED_ECC_STS_REG) & ECC_VERIFY_PASS; ++ if (sts == ECC_VERIFY_PASS) { ++ ecdsa_engine->results = 0; ++ AST_DBG(ecdsa_dev, "Verify PASS !\n"); ++ ++ } else { ++ ecdsa_engine->results = -EKEYREJECTED; ++ AST_DBG(ecdsa_dev, "Verify FAILED !\n"); ++ } ++ ++ /* Stop ECDSA engine */ ++ if (ecdsa_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&ecdsa_engine->done_task); ++ else ++ dev_err(ecdsa_dev->dev, "ECDSA no active requests.\n"); ++ ++ return ecdsa_engine->results; ++} ++#endif ++ ++static int aspeed_hw_trigger(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ AST_DBG(ecdsa_dev, "\n"); ++ ++ ast_write(ecdsa_dev, 0x1, ASPEED_ECC_ECDSA_VERIFY); ++ ++ ast_write(ecdsa_dev, ECC_EN, ASPEED_ECC_CMD_REG); ++ ast_write(ecdsa_dev, 0x0, ASPEED_ECC_CMD_REG); ++ ++#ifdef ASPEED_ECDSA_IRQ_MODE ++ return 0; ++#else ++ return aspeed_ecdsa_wait_complete(ecdsa_dev); ++#endif ++} ++ ++static int _aspeed_ecdsa_verify(struct aspeed_ecc_ctx *ctx, const u64 *hash, ++ const u64 *r, const u64 *s) ++{ ++ int nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; ++ const struct ecc_curve *curve = ctx->curve; ++ void __iomem *base = ctx->ecdsa_dev->regs; ++ unsigned int ndigits = curve->g.ndigits; ++ u8 *data, *buf; ++ ++ /* 0 < r < n and 0 < s < n */ ++ if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 || ++ vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0) ++ return -EBADMSG; ++ ++ /* hash is given */ ++ AST_DBG(ctx->ecdsa_dev, "hash : %016llx %016llx ... %016llx\n", ++ hash[ndigits - 1], hash[ndigits - 2], hash[0]); ++ ++ data = vmalloc(nbytes); ++ if (!data) ++ return -ENOMEM; ++ ++ /* Initial signature/message and trigger ecdsa verification */ ++ buf = (u8 *)r; ++ hexdump("Dump r:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)r, nbytes); ++ memcpy_toio(base + ASPEED_ECC_SIGN_R_REG, data, nbytes); ++ ++ buf = (u8 *)s; ++ hexdump("Dump s:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)s, nbytes); ++ memcpy_toio(base + ASPEED_ECC_SIGN_S_REG, data, nbytes); ++ ++ buf = (u8 *)hash; ++ hexdump("Dump m:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)hash, nbytes); ++ memcpy_toio(base + ASPEED_ECC_MESSAGE_REG, data, nbytes); ++ ++ vfree(data); ++ ++ return aspeed_hw_trigger(ctx->ecdsa_dev); ++} ++ ++static int aspeed_ecdsa_handle_queue(struct aspeed_ecdsa_dev *ecdsa_dev, ++ struct akcipher_request *req) ++{ ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ int ret; ++ ++ if (aspeed_ecdsa_need_fallback(ctx, req->dst_len)) { ++ AST_DBG(ctx->ecdsa_dev, "SW fallback\n"); ++ ++ akcipher_request_set_tfm(req, ctx->fallback_tfm); ++ ret = crypto_akcipher_verify(req); ++ akcipher_request_set_tfm(req, tfm); ++ ++ AST_DBG(ctx->ecdsa_dev, "SW verify...ret:0x%x\n", ret); ++ ++ return ret; ++ } ++ ++ return crypto_transfer_akcipher_request_to_engine(ecdsa_dev->crypt_engine_ecdsa, req); ++} ++ ++static int aspeed_ecdsa_trigger(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ struct akcipher_request *req = ecdsa_engine->req; ++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ size_t keylen = ctx->curve->g.ndigits * sizeof(u64); ++ struct ecdsa_signature_ctx sig_ctx = { ++ .curve = ctx->curve, ++ }; ++ u8 rawhash[ECC_MAX_BYTES]; ++ u64 hash[ECC_MAX_DIGITS]; ++ unsigned char *buffer; ++ ssize_t diff; ++ int ret; ++ ++ AST_DBG(ecdsa_dev, "\n"); ++ ++ if (unlikely(!ctx->pub_key_set)) ++ return -EINVAL; ++ ++ buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); ++ if (!buffer) ++ return -ENOMEM; ++ ++ /* Input src: signature + digest */ ++ sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len + req->dst_len), ++ buffer, req->src_len + req->dst_len, 0); ++ ++ ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, ++ buffer, req->src_len); ++ if (ret < 0) ++ goto error; ++ ++ /* if the hash is shorter then we will add leading zeros to fit to ndigits */ ++ diff = keylen - req->dst_len; ++ if (diff >= 0) { ++ if (diff) ++ memset(rawhash, 0, diff); ++ memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len); ++ } else if (diff < 0) { ++ /* given hash is longer, we take the left-most bytes */ ++ memcpy(&rawhash, buffer + req->src_len, keylen); ++ } ++ ++ ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); ++ ++ ret = _aspeed_ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); ++ ++error: ++ kfree(buffer); ++ ++ return ret; ++} ++ ++/* ++ * Verify an ECDSA signature. ++ */ ++static int aspeed_ecdsa_verify(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(cipher); ++ struct aspeed_ecdsa_dev *ecdsa_dev = ctx->ecdsa_dev; ++ ++ AST_DBG(ecdsa_dev, "\n"); ++ ++ ctx->trigger = aspeed_ecdsa_trigger; ++ ++ return aspeed_ecdsa_handle_queue(ecdsa_dev, req); ++} ++ ++static int aspeed_ecdsa_ecc_ctx_init(struct aspeed_ecc_ctx *ctx, unsigned int curve_id) ++{ ++ void __iomem *base = ctx->ecdsa_dev->regs; ++ u8 *data, *buf; ++ u32 ctrl; ++ int nbytes; ++ ++ ctx->curve_id = curve_id; ++ ctx->curve = ecc_get_curve(curve_id); ++ if (!ctx->curve) ++ return -EINVAL; ++ ++ nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; ++ ++ switch (curve_id) { ++ case ECC_CURVE_NIST_P256: ++ AST_DBG(ctx->ecdsa_dev, "curve ECC_CURVE_NIST_P256\n"); ++ ctrl = ECDSA_256_EN; ++ break; ++ case ECC_CURVE_NIST_P384: ++ AST_DBG(ctx->ecdsa_dev, "curve ECC_CURVE_NIST_P384\n"); ++ ctrl = ECDSA_384_EN; ++ break; ++ } ++ ++ mutex_lock(&ctx->ecdsa_dev->lock); ++ ++ ast_write(ctx->ecdsa_dev, ECC_EN | ctrl, ASPEED_ECC_CTRL_REG); ++ ++ /* Initial Curve: ecc point/p/a/n */ ++ data = vmalloc(nbytes); ++ if (!data) ++ return -ENOMEM; ++ ++ buf = (u8 *)ctx->curve->g.x; ++ hexdump("Dump Gx:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->curve->g.x, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_GX_REG, data, nbytes); ++ ++ buf = (u8 *)ctx->curve->g.y; ++ hexdump("Dump Gy:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->curve->g.y, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_GY_REG, data, nbytes); ++ ++ buf = (u8 *)ctx->curve->p; ++ hexdump("Dump P:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->curve->p, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_P_REG, data, nbytes); ++ ++ buf = (u8 *)ctx->curve->a; ++ hexdump("Dump A:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->curve->a, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_A_REG, data, nbytes); ++ ++ buf = (u8 *)ctx->curve->n; ++ hexdump("Dump N:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->curve->n, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_N_REG, data, nbytes); ++ ++ vfree(data); ++ return 0; ++} ++ ++static void aspeed_ecdsa_ecc_ctx_deinit(struct aspeed_ecc_ctx *ctx) ++{ ++ mutex_unlock(&ctx->ecdsa_dev->lock); ++ ++ ctx->pub_key_set = false; ++} ++ ++static void aspeed_ecdsa_ecc_ctx_reset(struct aspeed_ecc_ctx *ctx) ++{ ++ ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y, ++ ctx->curve->g.ndigits); ++} ++ ++/* ++ * Set the public key given the raw uncompressed key data from an X509 ++ * certificate. The key data contain the concatenated X and Y coordinates of ++ * the public key. ++ */ ++static int aspeed_ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, ++ unsigned int keylen) ++{ ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ int nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; ++ void __iomem *base = ctx->ecdsa_dev->regs; ++ const unsigned char *d = key; ++ const u64 *digits = (const u64 *)&d[1]; ++ unsigned int ndigits; ++ u8 *data, *buf; ++ int ret; ++ ++ AST_DBG(ctx->ecdsa_dev, "\n"); ++ ++ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen); ++ if (ret) ++ return ret; ++ ++ aspeed_ecdsa_ecc_ctx_reset(ctx); ++ ++ if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0) ++ return -EINVAL; ++ /* we only accept uncompressed format indicated by '4' */ ++ if (d[0] != 4) ++ return -EINVAL; ++ ++ keylen--; ++ ndigits = (keylen >> 1) / sizeof(u64); ++ if (ndigits != ctx->curve->g.ndigits) ++ return -EINVAL; ++ ++ ecc_swap_digits(digits, ctx->pub_key.x, ndigits); ++ ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); ++ ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); ++ ++ /* Set public key: Qx/Qy */ ++ data = vmalloc(nbytes); ++ if (!data) ++ return -ENOMEM; ++ ++ buf = (u8 *)ctx->pub_key.x; ++ hexdump("Dump Qx:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->pub_key.x, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_QX_REG, data, nbytes); ++ ++ buf = (u8 *)ctx->pub_key.y; ++ hexdump("Dump Qy:", buf, nbytes); ++ ++ buff_reverse(data, (u8 *)ctx->pub_key.y, nbytes); ++ memcpy_toio(base + ASPEED_ECC_PAR_QY_REG, data, nbytes); ++ ++ ctx->pub_key_set = ret == 0; ++ ++ vfree(data); ++ ++ return ret; ++} ++ ++static void aspeed_ecdsa_exit_tfm(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ ++ AST_DBG(ctx->ecdsa_dev, "\n"); ++ ++ aspeed_ecdsa_ecc_ctx_deinit(ctx); ++ ++ crypto_free_akcipher(ctx->fallback_tfm); ++} ++ ++static unsigned int aspeed_ecdsa_max_size(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ ++ return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; ++} ++ ++static int aspeed_ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm); ++ const char *name = crypto_tfm_alg_name(&tfm->base); ++ struct aspeed_ecdsa_alg *ecdsa_alg; ++ ++ ecdsa_alg = container_of(alg, struct aspeed_ecdsa_alg, akcipher.base); ++ ++ ctx->ecdsa_dev = ecdsa_alg->ecdsa_dev; ++ ++ AST_DBG(ctx->ecdsa_dev, "\n"); ++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(ctx->fallback_tfm)) { ++ dev_err(ctx->ecdsa_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ name, PTR_ERR(ctx->fallback_tfm)); ++ return PTR_ERR(ctx->fallback_tfm); ++ } ++ ++ return aspeed_ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384); ++} ++ ++static int aspeed_ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm); ++ const char *name = crypto_tfm_alg_name(&tfm->base); ++ struct aspeed_ecdsa_alg *ecdsa_alg; ++ ++ ecdsa_alg = container_of(alg, struct aspeed_ecdsa_alg, akcipher.base); ++ ++ ctx->ecdsa_dev = ecdsa_alg->ecdsa_dev; ++ ++ AST_DBG(ctx->ecdsa_dev, "\n"); ++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(ctx->fallback_tfm)) { ++ dev_err(ctx->ecdsa_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ name, PTR_ERR(ctx->fallback_tfm)); ++ return PTR_ERR(ctx->fallback_tfm); ++ } ++ ++ return aspeed_ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256); ++} ++ ++static int aspeed_ecdsa_complete(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ struct akcipher_request *req = ecdsa_engine->req; ++ int results = ecdsa_engine->results; ++ ++ AST_DBG(ecdsa_dev, "\n"); ++ ++ ecdsa_engine->flags &= ~CRYPTO_FLAGS_BUSY; ++ ++ crypto_finalize_akcipher_request(ecdsa_dev->crypt_engine_ecdsa, req, results); ++ ++ return results; ++} ++ ++static int aspeed_ecdsa_do_request(struct crypto_engine *engine, void *areq) ++{ ++ struct akcipher_request *req = akcipher_request_cast(areq); ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(cipher); ++ struct aspeed_ecdsa_dev *ecdsa_dev = ctx->ecdsa_dev; ++ struct aspeed_engine_ecdsa *ecdsa_engine; ++ ++ AST_DBG(ctx->ecdsa_dev, "\n"); ++ ++ ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ ecdsa_engine->req = req; ++ ecdsa_engine->flags |= CRYPTO_FLAGS_BUSY; ++ ecdsa_engine->resume = aspeed_ecdsa_complete; ++ ++ return ctx->trigger(ecdsa_dev); ++} ++ ++static void aspeed_ecdsa_done_task(unsigned long data) ++{ ++ struct aspeed_ecdsa_dev *ecdsa_dev = (struct aspeed_ecdsa_dev *)data; ++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ u32 ctrl; ++ ++ AST_DBG(ecdsa_dev, "\n"); ++ ++ /* Reset engine */ ++ ctrl = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG); ++ ast_write(ecdsa_dev, 0, ASPEED_ECC_CTRL_REG); ++ ++ /* Memory barrier to ensure ecc ctrl is reset. */ ++ mb(); ++ ast_write(ecdsa_dev, ctrl, ASPEED_ECC_CTRL_REG); ++ ++ (void)ecdsa_engine->resume(ecdsa_dev); ++} ++ ++static struct aspeed_ecdsa_alg aspeed_ecdsa_nist_p256 = { ++ .akcipher.base = { ++ .verify = aspeed_ecdsa_verify, ++ .set_pub_key = aspeed_ecdsa_set_pub_key, ++ .max_size = aspeed_ecdsa_max_size, ++ .init = aspeed_ecdsa_nist_p256_init_tfm, ++ .exit = aspeed_ecdsa_exit_tfm, ++ .base = { ++ .cra_name = "ecdsa-nist-p256", ++ .cra_driver_name = "aspeed-ecdsa-nist-p256", ++ .cra_priority = 300, ++ .cra_module = THIS_MODULE, ++ .cra_ctxsize = sizeof(struct aspeed_ecc_ctx), ++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | ++ CRYPTO_ALG_KERN_DRIVER_ONLY | ++ CRYPTO_ALG_NEED_FALLBACK, ++ }, ++ }, ++ .akcipher.op = { ++ .do_one_request = aspeed_ecdsa_do_request, ++ }, ++}; ++ ++static struct aspeed_ecdsa_alg aspeed_ecdsa_nist_p384 = { ++ .akcipher.base = { ++ .verify = aspeed_ecdsa_verify, ++ .set_pub_key = aspeed_ecdsa_set_pub_key, ++ .max_size = aspeed_ecdsa_max_size, ++ .init = aspeed_ecdsa_nist_p384_init_tfm, ++ .exit = aspeed_ecdsa_exit_tfm, ++ .base = { ++ .cra_name = "ecdsa-nist-p384", ++ .cra_driver_name = "aspeed-ecdsa-nist-p384", ++ .cra_priority = 300, ++ .cra_module = THIS_MODULE, ++ .cra_ctxsize = sizeof(struct aspeed_ecc_ctx), ++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | ++ CRYPTO_ALG_KERN_DRIVER_ONLY | ++ CRYPTO_ALG_NEED_FALLBACK, ++ }, ++ }, ++ .akcipher.op = { ++ .do_one_request = aspeed_ecdsa_do_request, ++ }, ++}; ++ ++static int aspeed_ecdsa_register(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ int rc; ++ ++ aspeed_ecdsa_nist_p256.ecdsa_dev = ecdsa_dev; ++ rc = crypto_engine_register_akcipher(&aspeed_ecdsa_nist_p256.akcipher); ++ if (rc) ++ goto nist_p256_error; ++ ++ aspeed_ecdsa_nist_p384.ecdsa_dev = ecdsa_dev; ++ rc = crypto_engine_register_akcipher(&aspeed_ecdsa_nist_p384.akcipher); ++ if (rc) ++ goto nist_p384_error; ++ ++ return 0; ++ ++nist_p384_error: ++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p256.akcipher); ++ ++nist_p256_error: ++ return rc; ++} ++ ++static void aspeed_ecdsa_unregister(struct aspeed_ecdsa_dev *ecdsa_dev) ++{ ++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p256.akcipher); ++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p384.akcipher); ++} ++ ++#ifdef ASPEED_ECDSA_IRQ_MODE ++/* ecdsa interrupt service routine. */ ++static irqreturn_t aspeed_ecdsa_irq(int irq, void *dev) ++{ ++ struct aspeed_ecdsa_dev *ecdsa_dev = (struct aspeed_ecdsa_dev *)dev; ++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ u32 sts; ++ ++ sts = ast_read(ecdsa_dev, ASPEED_ECC_INT_STS); ++ ast_write(ecdsa_dev, sts, ASPEED_ECC_INT_STS); ++ ++ AST_DBG(ecdsa_dev, "irq sts:0x%x\n", sts); ++ ++ sts = ast_read(ecdsa_dev, ASPEED_ECC_STS_REG) & ECC_VERIFY_PASS; ++ if (sts == ECC_VERIFY_PASS) { ++ AST_DBG(ecdsa_dev, "Verify PASS !\n"); ++ ++ ecdsa_engine->results = 0; ++ /* Stop ECDSA engine */ ++ if (ecdsa_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&ecdsa_engine->done_task); ++ else ++ dev_err(ecdsa_dev->dev, "ECDSA no active requests.\n"); ++ ++ } else { ++ ecdsa_engine->results = -EKEYREJECTED; ++ AST_DBG(ecdsa_dev, "Verify FAILED !\n"); ++ } ++ ++ return IRQ_HANDLED; ++} ++#endif ++ ++static const struct of_device_id aspeed_ecdsa_of_matches[] = { ++ { .compatible = "aspeed,ast2700-ecdsa", }, ++ {}, ++}; ++ ++static int aspeed_ecdsa_probe(struct platform_device *pdev) ++{ ++ struct aspeed_engine_ecdsa *ecdsa_engine; ++ struct aspeed_ecdsa_dev *ecdsa_dev; ++ struct device *dev = &pdev->dev; ++ int rc; ++ ++ ecdsa_dev = devm_kzalloc(dev, sizeof(struct aspeed_ecdsa_dev), ++ GFP_KERNEL); ++ if (!ecdsa_dev) ++ return -ENOMEM; ++ ++ ecdsa_dev->dev = dev; ++ ++ platform_set_drvdata(pdev, ecdsa_dev); ++ ++ ecdsa_dev->regs = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(ecdsa_dev->regs)) ++ return PTR_ERR(ecdsa_dev->regs); ++ ++#ifdef ASPEED_ECDSA_IRQ_MODE ++ /* Get irq number and register it */ ++ ecdsa_dev->irq = platform_get_irq(pdev, 0); ++ if (ecdsa_dev->irq < 0) ++ return -ENXIO; ++ ++ rc = devm_request_irq(dev, ecdsa_dev->irq, aspeed_ecdsa_irq, 0, ++ dev_name(dev), ecdsa_dev); ++ if (rc) { ++ dev_err(dev, "Failed to request irq.\n"); ++ return rc; ++ } ++ ++ /* Enable interrupt */ ++ ast_write(ecdsa_dev, 0x1, ASPEED_ECC_INT_EN); ++#endif ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_warn(&pdev->dev, "No suitable DMA available\n"); ++ return rc; ++ } ++ ++ ecdsa_dev->clk = devm_clk_get_enabled(dev, NULL); ++ if (IS_ERR(ecdsa_dev->clk)) { ++ dev_err(dev, "Failed to get ecdsa clk\n"); ++ return PTR_ERR(ecdsa_dev->clk); ++ } ++ ++ ecdsa_dev->rst = devm_reset_control_get_shared(dev, NULL); ++ if (IS_ERR(ecdsa_dev->rst)) { ++ dev_err(dev, "Failed to get ecdsa reset\n"); ++ return PTR_ERR(ecdsa_dev->rst); ++ } ++ ++ rc = reset_control_deassert(ecdsa_dev->rst); ++ if (rc) { ++ dev_err(dev, "Deassert ecdsa reset failed\n"); ++ return rc; ++ } ++ ++ ecdsa_engine = &ecdsa_dev->ecdsa_engine; ++ ++ /* Initialize crypto hardware engine structure for ECDSA */ ++ ecdsa_dev->crypt_engine_ecdsa = crypto_engine_alloc_init(ecdsa_dev->dev, true); ++ if (!ecdsa_dev->crypt_engine_ecdsa) { ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ rc = crypto_engine_start(ecdsa_dev->crypt_engine_ecdsa); ++ if (rc) ++ goto err_engine_ecdsa_start; ++ ++ tasklet_init(&ecdsa_engine->done_task, aspeed_ecdsa_done_task, ++ (unsigned long)ecdsa_dev); ++ ++ /* Self-test */ ++ rc = aspeed_ecdsa_self_test(ecdsa_dev); ++ if (rc) ++ goto err_engine_ecdsa_start; ++ ++ rc = aspeed_ecdsa_register(ecdsa_dev); ++ if (rc) { ++ dev_err(dev, "ECDSA algo register failed\n"); ++ return rc; ++ } ++ ++ mutex_init(&ecdsa_dev->lock); ++ ++ dev_info(dev, "Aspeed ECDSA Hardware Accelerator successfully registered\n"); ++ ++ return 0; ++ ++err_engine_ecdsa_start: ++ crypto_engine_exit(ecdsa_dev->crypt_engine_ecdsa); ++end: ++ return rc; ++} ++ ++static void aspeed_ecdsa_remove(struct platform_device *pdev) ++{ ++ struct aspeed_ecdsa_dev *ecdsa_dev = platform_get_drvdata(pdev); ++ ++ aspeed_ecdsa_unregister(ecdsa_dev); ++} ++ ++MODULE_DEVICE_TABLE(of, aspeed_ecdsa_of_matches); ++ ++static struct platform_driver aspeed_ecdsa_driver = { ++ .probe = aspeed_ecdsa_probe, ++ .remove = aspeed_ecdsa_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_ecdsa_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_ecdsa_driver); ++MODULE_AUTHOR("Neal Liu "); ++MODULE_DESCRIPTION("ASPEED ECDSA algorithm driver acceleration"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/crypto/aspeed/aspeed-ecdsa.h b/drivers/crypto/aspeed/aspeed-ecdsa.h +--- a/drivers/crypto/aspeed/aspeed-ecdsa.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-ecdsa.h 2025-12-23 10:16:21.140032401 +0000 +@@ -0,0 +1,119 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++ ++#ifndef __ASPEED_ECDSA_H__ ++#define __ASPEED_ECDSA_H__ ++ ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG ++#define AST_DBG(d, fmt, ...) \ ++ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) ++#else ++#define AST_DBG(d, fmt, ...) \ ++ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) ++#endif ++ ++/************************* ++ * * ++ * ECDSA regs definition * ++ * * ++ *************************/ ++#define ASPEED_ECC_STS_REG 0xb0 ++#define ASPEED_ECC_CTRL_REG 0xb4 ++#define ASPEED_ECC_CMD_REG 0xbc ++#define ASPEED_ECC_INT_EN 0xc0 ++#define ASPEED_ECC_INT_STS 0xc4 ++ ++#define ASPEED_ECC_DATA_BASE 0x800 ++#define ASPEED_ECC_PAR_GX_REG 0x800 ++#define ASPEED_ECC_PAR_GY_REG 0x840 ++#define ASPEED_ECC_PAR_QX_REG 0x880 ++#define ASPEED_ECC_PAR_QY_REG 0x8c0 ++#define ASPEED_ECC_PAR_P_REG 0x900 ++#define ASPEED_ECC_PAR_A_REG 0x940 ++#define ASPEED_ECC_PAR_N_REG 0x980 ++#define ASPEED_ECC_SIGN_R_REG 0x9c0 ++#define ASPEED_ECC_SIGN_S_REG 0xa00 ++#define ASPEED_ECC_MESSAGE_REG 0xa40 ++#define ASPEED_ECC_ECDSA_VERIFY 0xbc0 ++ ++/* sts */ ++#define ECC_IDLE BIT(0) ++#define ECC_VERIFY_PASS BIT(1) ++ ++/* ctrl/cmd */ ++#define ECC_EN BIT(0) ++#define ECDSA_384_EN 0x0 ++#define ECDSA_256_EN BIT(1) ++#define ADDR_BE BIT(2) ++#define DATA_BE BIT(3) ++ ++#define PAR_LEN_256 32 ++#define PAR_LEN_384 48 ++ ++#define ASPEED_ECC_POLLING_TIME 100 ++#define ASPEED_ECC_TIMEOUT 100000 /* 100 ms */ ++ ++#define CRYPTO_FLAGS_BUSY BIT(1) ++ ++#define ast_write(ast, val, offset) \ ++ writel((val), (ast)->regs + (offset)) ++ ++#define ast_read(ast, offset) \ ++ readl((ast)->regs + (offset)) ++ ++struct aspeed_ecdsa_dev; ++ ++typedef int (*aspeed_ecdsa_fn_t)(struct aspeed_ecdsa_dev *); ++ ++struct aspeed_ecc_ctx { ++ struct aspeed_ecdsa_dev *ecdsa_dev; ++ unsigned int curve_id; ++ const struct ecc_curve *curve; ++ ++ bool pub_key_set; ++ u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */ ++ u64 y[ECC_MAX_DIGITS]; ++ struct ecc_point pub_key; ++ ++ struct crypto_akcipher *fallback_tfm; ++ ++ aspeed_ecdsa_fn_t trigger; ++}; ++ ++struct ecdsa_signature_ctx { ++ const struct ecc_curve *curve; ++ u64 r[ECC_MAX_DIGITS]; ++ u64 s[ECC_MAX_DIGITS]; ++}; ++ ++struct aspeed_engine_ecdsa { ++ struct tasklet_struct done_task; ++ unsigned long flags; ++ struct akcipher_request *req; ++ int results; ++ ++ /* callback func */ ++ aspeed_ecdsa_fn_t resume; ++}; ++ ++struct aspeed_ecdsa_alg { ++ struct aspeed_ecdsa_dev *ecdsa_dev; ++ struct akcipher_engine_alg akcipher; ++}; ++ ++struct aspeed_ecdsa_dev { ++ void __iomem *regs; ++ struct device *dev; ++ struct clk *clk; ++ struct reset_control *rst; ++ int irq; ++ ++ /* Support ecdsa256/384 execution concurrent */ ++ struct mutex lock; ++ ++ struct crypto_engine *crypt_engine_ecdsa; ++ struct aspeed_engine_ecdsa ecdsa_engine; ++}; ++ ++extern const struct asn1_decoder ecdsasignature_decoder; ++ ++#endif +diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c +--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c 2025-12-23 10:16:21.140032401 +0000 +@@ -24,6 +24,11 @@ + dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) + #endif + ++#define ASPEED_SEC_PROTECTION 0x0 ++#define SEC_UNLOCK_PASSWORD 0x349fe38a ++#define ASPEED_VAULT_KEY_CTRL 0x80C ++#define SEC_VK_CTRL_VK_SELECTION BIT(0) ++ + static int aspeed_crypto_do_fallback(struct skcipher_request *areq) + { + struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); +@@ -143,6 +148,7 @@ + dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); + } ++ up(&hace_dev->lock); + + return aspeed_sk_complete(hace_dev, 0); + } +@@ -206,10 +212,21 @@ + crypto_engine->resume = aspeed_sk_transfer; + + /* Trigger engines */ ++ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, ++ ASPEED_HACE_CONTEXT); + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, + ASPEED_HACE_SRC); + ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, + ASPEED_HACE_DEST); ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma >> 32, ++ ASPEED_HACE_CONTEXT_H); ++ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr >> 32, ++ ASPEED_HACE_SRC_H); ++ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr >> 32, ++ ASPEED_HACE_DEST_H); ++#endif ++ + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); + +@@ -222,21 +239,65 @@ + struct aspeed_sg_list *src_list, *dst_list; + dma_addr_t src_dma_addr, dst_dma_addr; + struct aspeed_cipher_reqctx *rctx; ++ struct crypto_skcipher *cipher; ++ struct aspeed_cipher_ctx *ctx; + struct skcipher_request *req; + struct scatterlist *s; ++ int use_vault_key = 0; + int src_sg_len; + int dst_sg_len; + int total, i; + int rc; ++ u32 val; + + CIPHER_DBG(hace_dev, "\n"); + + req = crypto_engine->req; ++ cipher = crypto_skcipher_reqtfm(req); ++ ctx = crypto_skcipher_ctx(cipher); + rctx = skcipher_request_ctx(req); + + rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL | + HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN; + ++ if (crypto_engine->load_vault_key) { ++ writel(SEC_UNLOCK_PASSWORD, hace_dev->sec_regs + ASPEED_SEC_PROTECTION); ++ CIPHER_DBG(hace_dev, "unlock SB, SEC000=0x%x\n", readl(hace_dev->sec_regs + ASPEED_SEC_PROTECTION)); ++ val = readl(hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL); ++ if (val & BIT(2)) { ++ if (ctx->dummy_key == 1 && !(val & BIT(0))) { ++ use_vault_key = 1; ++ CIPHER_DBG(hace_dev, "Use Vault key 1\n"); ++ } else if (ctx->dummy_key == 2 && (val & BIT(0))) { ++ use_vault_key = 1; ++ CIPHER_DBG(hace_dev, "Use Vault key 2\n"); ++ } else { ++ use_vault_key = 0; ++ } ++ } else { ++ if (ctx->dummy_key == 1) { ++ use_vault_key = 1; ++ val &= ~SEC_VK_CTRL_VK_SELECTION; ++ writel(val, hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL); ++ CIPHER_DBG(hace_dev, "Set Vault key 1\n"); ++ } else if (ctx->dummy_key == 2) { ++ use_vault_key = 1; ++ val |= SEC_VK_CTRL_VK_SELECTION; ++ writel(val, hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL); ++ CIPHER_DBG(hace_dev, "Set Vault key 2\n"); ++ } else { ++ use_vault_key = 0; ++ } ++ } ++ writel(0x0, hace_dev->sec_regs + ASPEED_SEC_PROTECTION); ++ CIPHER_DBG(hace_dev, "lock SB, SEC000=0x%x\n", readl(hace_dev->sec_regs + ASPEED_SEC_PROTECTION)); ++ ++ if (use_vault_key) ++ rctx->enc_cmd |= HACE_CMD_AES_KEY_FROM_OTP; ++ else ++ rctx->enc_cmd &= ~HACE_CMD_AES_KEY_FROM_OTP; ++ } ++ + /* BIDIRECTIONAL */ + if (req->dst == req->src) { + src_sg_len = dma_map_sg(hace_dev->dev, req->src, +@@ -332,8 +393,19 @@ + mb(); + + /* Trigger engines */ ++ down(&hace_dev->lock); ++ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, ++ ASPEED_HACE_CONTEXT); + ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC); + ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST); ++ ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma >> 32, ++ ASPEED_HACE_CONTEXT_H); ++ ast_hace_write(hace_dev, src_dma_addr >> 32, ASPEED_HACE_SRC_H); ++ ast_hace_write(hace_dev, dst_dma_addr >> 32, ASPEED_HACE_DEST_H); ++#endif ++ + ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); + ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); + +@@ -346,7 +418,7 @@ + + } else { + dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, +- DMA_TO_DEVICE); ++ DMA_FROM_DEVICE); + dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, + DMA_TO_DEVICE); + } +@@ -380,9 +452,6 @@ + rctx->dst_nents = sg_nents(req->dst); + rctx->src_nents = sg_nents(req->src); + +- ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, +- ASPEED_HACE_CONTEXT); +- + if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { + if (rctx->enc_cmd & HACE_CMD_DES_SELECT) + memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE, +@@ -392,7 +461,8 @@ + AES_BLOCK_SIZE); + } + +- if (hace_dev->version == AST2600_VERSION) { ++ if (hace_dev->version == AST2600_VERSION || ++ hace_dev->version == AST2700_VERSION) { + memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len); + + return aspeed_sk_start_sg(hace_dev); +@@ -580,6 +650,8 @@ + + CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8)); + ++ ctx->dummy_key = find_dummy_key(key, keylen); ++ + if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && + keylen != AES_KEYSIZE_256) + return -EINVAL; +@@ -919,19 +991,49 @@ + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) + crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); + +- if (hace_dev->version != AST2600_VERSION) ++ if (hace_dev->version == AST2500_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) + crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); + } + ++#ifdef CONFIG_AST2600_OTP ++static void find_vault_key(struct aspeed_hace_dev *hace_dev) ++{ ++ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; ++ u32 otp_data[16]; ++ int i; ++ ++ crypto_engine->load_vault_key = 0; ++ ++ otp_read_data_buf(0, otp_data, 16); ++ for (i = 0; i < 16; i++) { ++ CIPHER_DBG(hace_dev, "OTPDATA%d=%x\n", i, otp_data[i]); ++ if (((otp_data[i] >> 14) & 0xf) == 1) { ++ CIPHER_DBG(hace_dev, "Found vault key in OTP\n"); ++ crypto_engine->load_vault_key = 1; ++ return; ++ } ++ if (otp_data[i] & BIT(13)) ++ break; ++ } ++ CIPHER_DBG(hace_dev, "Not found vault key in OTP\n"); ++} ++#endif ++ + void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) + { + int rc, i; + + CIPHER_DBG(hace_dev, "\n"); + ++#ifdef CONFIG_AST2600_OTP ++ find_vault_key(hace_dev); ++#else ++ hace_dev->crypto_engine.load_vault_key = 0; ++#endif ++ + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { + aspeed_crypto_algs[i].hace_dev = hace_dev; + rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); +@@ -941,7 +1043,7 @@ + } + } + +- if (hace_dev->version != AST2600_VERSION) ++ if (hace_dev->version == AST2500_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { +@@ -953,3 +1055,88 @@ + } + } + } ++ ++static void aspeed_hace_crypto_done_task(unsigned long data) ++{ ++ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; ++ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; ++ ++ crypto_engine->resume(hace_dev); ++} ++ ++int aspeed_hace_crypto_init(struct aspeed_hace_dev *hace_dev) ++{ ++ struct aspeed_engine_crypto *crypto_engine; ++ int rc; ++ ++ crypto_engine = &hace_dev->crypto_engine; ++ ++ /* Initialize crypto hardware engine structure for crypto */ ++ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, ++ true); ++ if (!hace_dev->crypt_engine_crypto) { ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ rc = crypto_engine_start(hace_dev->crypt_engine_crypto); ++ if (rc) ++ goto err_engine_crypto_start; ++ ++ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, ++ (unsigned long)hace_dev); ++ ++ /* Allocate DMA buffer for crypto engine context used */ ++ crypto_engine->cipher_ctx = ++ dmam_alloc_coherent(hace_dev->dev, ++ PAGE_SIZE, ++ &crypto_engine->cipher_ctx_dma, ++ GFP_KERNEL); ++ if (!crypto_engine->cipher_ctx) { ++ dev_err(hace_dev->dev, "Failed to allocate cipher ctx dma\n"); ++ rc = -ENOMEM; ++ goto err_engine_crypto_start; ++ } ++ ++ /* Allocate DMA buffer for crypto engine input used */ ++ crypto_engine->cipher_addr = ++ dmam_alloc_coherent(hace_dev->dev, ++ ASPEED_CRYPTO_SRC_DMA_BUF_LEN, ++ &crypto_engine->cipher_dma_addr, ++ GFP_KERNEL); ++ if (!crypto_engine->cipher_addr) { ++ dev_err(hace_dev->dev, "Failed to allocate cipher addr dma\n"); ++ rc = -ENOMEM; ++ goto err_engine_crypto_start; ++ } ++ ++ /* Allocate DMA buffer for crypto engine output used */ ++ if (hace_dev->version == AST2600_VERSION || ++ hace_dev->version == AST2700_VERSION) { ++ crypto_engine->dst_sg_addr = ++ dmam_alloc_coherent(hace_dev->dev, ++ ASPEED_CRYPTO_DST_DMA_BUF_LEN, ++ &crypto_engine->dst_sg_dma_addr, ++ GFP_KERNEL); ++ if (!crypto_engine->dst_sg_addr) { ++ dev_err(hace_dev->dev, "Failed to allocate dst_sg dma\n"); ++ rc = -ENOMEM; ++ goto err_engine_crypto_start; ++ } ++ } ++ ++ return 0; ++ ++err_engine_crypto_start: ++ crypto_engine_exit(hace_dev->crypt_engine_crypto); ++end: ++ return rc; ++} ++ ++void aspeed_hace_crypto_remove(struct aspeed_hace_dev *hace_dev) ++{ ++ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; ++ ++ crypto_engine_exit(hace_dev->crypt_engine_crypto); ++ tasklet_kill(&crypto_engine->done_task); ++} +diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c +--- a/drivers/crypto/aspeed/aspeed-hace-hash.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-hace-hash.c 2025-12-23 10:16:21.140032401 +0000 +@@ -138,21 +138,13 @@ + return -EINVAL; + } + +- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, +- rctx->offset, remain, 0); ++ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, ++ remain, 0); + + rctx->bufcnt = remain; +- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, +- SHA512_DIGEST_SIZE, +- DMA_BIDIRECTIONAL); +- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); +- return -ENOMEM; +- } + + hash_engine->src_length = length - remain; + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; +- hash_engine->digest_dma = rctx->digest_dma_addr; + + return 0; + } +@@ -187,30 +179,12 @@ + } + + src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr; +- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, +- SHA512_DIGEST_SIZE, +- DMA_BIDIRECTIONAL); +- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); +- rc = -ENOMEM; +- goto free_src_sg; +- } + + if (rctx->bufcnt != 0) { + u32 phy_addr; + u32 len; + +- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, +- rctx->buffer, +- rctx->block_size * 2, +- DMA_TO_DEVICE); +- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); +- rc = -ENOMEM; +- goto free_rctx_digest; +- } +- +- phy_addr = rctx->buffer_dma_addr; ++ phy_addr = hash_engine->buffer_dma_addr; + len = rctx->bufcnt; + length -= len; + +@@ -244,23 +218,15 @@ + + if (length != 0) { + rc = -EINVAL; +- goto free_rctx_buffer; ++ goto free_src_sg; + } + + rctx->offset = rctx->total - remain; + hash_engine->src_length = rctx->total + rctx->bufcnt - remain; + hash_engine->src_dma = hash_engine->ahash_src_dma_addr; +- hash_engine->digest_dma = rctx->digest_dma_addr; + + return 0; + +-free_rctx_buffer: +- if (rctx->bufcnt != 0) +- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, +- rctx->block_size * 2, DMA_TO_DEVICE); +-free_rctx_digest: +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); + free_src_sg: + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, + DMA_TO_DEVICE); +@@ -294,13 +260,15 @@ + + AHASH_DBG(hace_dev, "\n"); + +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +- +- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, +- rctx->block_size * 2, DMA_TO_DEVICE); ++ memcpy(req->result, hash_engine->digest_addr, rctx->digsize); + +- memcpy(req->result, rctx->digest, rctx->digsize); ++ /* ++ * Workaround for aes engine hang: The sha configuration may cause aes ++ * engine to hang. To address this issue, the hace engine is reset after ++ * the hash calculation is completed. ++ */ ++ aspeed_hace_reset(hace_dev); ++ up(&hace_dev->lock); + + return aspeed_ahash_complete(hace_dev); + } +@@ -315,13 +283,18 @@ + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); + ++ memcpy(hash_engine->digest_addr, rctx->digest, rctx->ivsize); ++ memcpy(hash_engine->buffer_addr, rctx->buffer, rctx->bufcnt); ++ hash_engine->digest_dma = hash_engine->digest_dma_addr; ++ + AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n", + &hash_engine->src_dma, &hash_engine->digest_dma, + hash_engine->src_length); + +- rctx->cmd |= HASH_CMD_INT_ENABLE; ++ rctx->cmd |= HASH_CMD_INT_ENABLE | HASH_CMD_MBUS_REQ_SYNC_EN; + hash_engine->resume = resume; + ++ /* Trigger engines */ + ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC); + ast_hace_write(hace_dev, hash_engine->digest_dma, + ASPEED_HACE_HASH_DIGEST_BUFF); +@@ -329,6 +302,14 @@ + ASPEED_HACE_HASH_KEY_BUFF); + ast_hace_write(hace_dev, hash_engine->src_length, + ASPEED_HACE_HASH_DATA_LEN); ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ ast_hace_write(hace_dev, hash_engine->src_dma >> 32, ++ ASPEED_HACE_HASH_SRC_H); ++ ast_hace_write(hace_dev, hash_engine->digest_dma >> 32, ++ ASPEED_HACE_HASH_DIGEST_BUFF_H); ++ ast_hace_write(hace_dev, hash_engine->digest_dma >> 32, ++ ASPEED_HACE_HASH_KEY_BUFF_H); ++#endif + + /* Memory barrier to ensure all data setup before engine starts */ + mb(); +@@ -351,15 +332,10 @@ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); + struct aspeed_sha_hmac_ctx *bctx = tctx->base; +- int rc = 0; + + AHASH_DBG(hace_dev, "\n"); + +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +- +- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, +- rctx->block_size * 2, DMA_TO_DEVICE); ++ memcpy(rctx->digest, hash_engine->digest_addr, rctx->ivsize); + + /* o key pad + hash sum 1 */ + memcpy(rctx->buffer, bctx->opad, rctx->block_size); +@@ -371,35 +347,10 @@ + aspeed_ahash_fill_padding(hace_dev, rctx); + memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize); + +- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, +- SHA512_DIGEST_SIZE, +- DMA_BIDIRECTIONAL); +- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); +- rc = -ENOMEM; +- goto end; +- } +- +- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, +- rctx->block_size * 2, +- DMA_TO_DEVICE); +- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); +- rc = -ENOMEM; +- goto free_rctx_digest; +- } +- +- hash_engine->src_dma = rctx->buffer_dma_addr; ++ hash_engine->src_dma = hash_engine->buffer_dma_addr; + hash_engine->src_length = rctx->bufcnt; +- hash_engine->digest_dma = rctx->digest_dma_addr; + + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); +- +-free_rctx_digest: +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +-end: +- return rc; + } + + static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev) +@@ -407,47 +358,19 @@ + struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + struct ahash_request *req = hash_engine->req; + struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); +- int rc = 0; + + AHASH_DBG(hace_dev, "\n"); + + aspeed_ahash_fill_padding(hace_dev, rctx); + +- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, +- rctx->digest, +- SHA512_DIGEST_SIZE, +- DMA_BIDIRECTIONAL); +- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); +- rc = -ENOMEM; +- goto end; +- } +- +- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, +- rctx->buffer, +- rctx->block_size * 2, +- DMA_TO_DEVICE); +- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { +- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); +- rc = -ENOMEM; +- goto free_rctx_digest; +- } +- +- hash_engine->src_dma = rctx->buffer_dma_addr; ++ hash_engine->src_dma = hash_engine->buffer_dma_addr; + hash_engine->src_length = rctx->bufcnt; +- hash_engine->digest_dma = rctx->digest_dma_addr; + + if (rctx->flags & SHA_FLAGS_HMAC) + return aspeed_hace_ahash_trigger(hace_dev, + aspeed_ahash_hmac_resume); + + return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); +- +-free_rctx_digest: +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +-end: +- return rc; + } + + static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev) +@@ -458,17 +381,11 @@ + + AHASH_DBG(hace_dev, "\n"); + ++ memcpy(rctx->digest, hash_engine->digest_addr, rctx->ivsize); ++ + dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, + DMA_TO_DEVICE); + +- if (rctx->bufcnt != 0) +- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, +- rctx->block_size * 2, +- DMA_TO_DEVICE); +- +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); +- + scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, + rctx->total - rctx->offset, 0); + +@@ -489,8 +406,7 @@ + + AHASH_DBG(hace_dev, "\n"); + +- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, +- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); ++ memcpy(rctx->digest, hash_engine->digest_addr, rctx->ivsize); + + if (rctx->flags & SHA_FLAGS_FINUP) + return aspeed_ahash_req_final(hace_dev); +@@ -508,12 +424,12 @@ + + AHASH_DBG(hace_dev, "\n"); + +- if (hace_dev->version == AST2600_VERSION) { +- rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL; +- resume = aspeed_ahash_update_resume_sg; ++ if (hace_dev->version == AST2500_VERSION) { ++ resume = aspeed_ahash_update_resume; + + } else { +- resume = aspeed_ahash_update_resume; ++ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL; ++ resume = aspeed_ahash_update_resume_sg; + } + + ret = hash_engine->dma_prepare(hace_dev); +@@ -526,8 +442,32 @@ + static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev, + struct ahash_request *req) + { +- return crypto_transfer_hash_request_to_engine( +- hace_dev->crypt_engine_hash, req); ++ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); ++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; ++ int ret = 0; ++ static bool init_req; ++ ++ /* The first request is enqueued, lock the queue */ ++ if (rctx->op & SHA_OP_INIT) { ++ mutex_lock(&hash_engine->queue_lock); ++ init_req = true; ++ return 0; ++ } ++ ++ /* The previous request is init request, enqueue the request with init flag */ ++ if (init_req) { ++ rctx->op |= SHA_OP_INIT; ++ init_req = false; ++ } ++ ++ ret = crypto_transfer_hash_request_to_engine(hace_dev->crypt_engine_hash, ++ req); ++ ++ /* The last request is enqueued, release the lock */ ++ if (rctx->op & SHA_OP_FINAL || rctx->flags & SHA_FLAGS_FINUP) ++ mutex_unlock(&hash_engine->queue_lock); ++ ++ return ret; + } + + static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq) +@@ -543,9 +483,14 @@ + hash_engine = &hace_dev->hash_engine; + hash_engine->flags |= CRYPTO_FLAGS_BUSY; + +- if (rctx->op == SHA_OP_UPDATE) ++ /* If the update/final is the first request, lock hace engine */ ++ if (rctx->op & SHA_OP_INIT) ++ down(&hace_dev->lock); ++ ++ /* Do the update/final operation no matter what */ ++ if (rctx->op & SHA_OP_UPDATE) + ret = aspeed_ahash_req_update(hace_dev); +- else if (rctx->op == SHA_OP_FINAL) ++ else if (rctx->op & SHA_OP_FINAL) + ret = aspeed_ahash_req_final(hace_dev); + + if (ret != -EINPROGRESS) +@@ -566,10 +511,10 @@ + hash_engine = &hace_dev->hash_engine; + hash_engine->req = req; + +- if (hace_dev->version == AST2600_VERSION) +- hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; +- else ++ if (hace_dev->version == AST2500_VERSION) + hash_engine->dma_prepare = aspeed_ahash_dma_prepare; ++ else ++ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; + } + + static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq) +@@ -671,6 +616,7 @@ + crypto_ahash_digestsize(tfm)); + + rctx->cmd = HASH_CMD_ACC_MODE; ++ rctx->op = SHA_OP_INIT; + rctx->flags = 0; + + switch (crypto_ahash_digestsize(tfm)) { +@@ -740,7 +686,7 @@ + rctx->flags |= SHA_FLAGS_HMAC; + } + +- return 0; ++ return aspeed_hace_hash_handle_queue(hace_dev, req); + } + + static int aspeed_sham_digest(struct ahash_request *req) +@@ -1196,7 +1142,7 @@ + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) + crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); + +- if (hace_dev->version != AST2600_VERSION) ++ if (hace_dev->version == AST2500_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) +@@ -1218,7 +1164,7 @@ + } + } + +- if (hace_dev->version != AST2600_VERSION) ++ if (hace_dev->version == AST2500_VERSION) + return; + + for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) { +@@ -1230,3 +1176,82 @@ + } + } + } ++ ++static void aspeed_hace_hash_done_task(unsigned long data) ++{ ++ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; ++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; ++ ++ hash_engine->resume(hace_dev); ++} ++ ++int aspeed_hace_hash_init(struct aspeed_hace_dev *hace_dev) ++{ ++ struct aspeed_engine_hash *hash_engine; ++ int rc; ++ ++ hash_engine = &hace_dev->hash_engine; ++ ++ /* Initialize crypto hardware engine structure for hash */ ++ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, ++ true); ++ if (!hace_dev->crypt_engine_hash) { ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ rc = crypto_engine_start(hace_dev->crypt_engine_hash); ++ if (rc) ++ goto err_engine_hash_start; ++ ++ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, ++ (unsigned long)hace_dev); ++ ++ /* Allocate DMA buffer for hash engine input used */ ++ hash_engine->ahash_src_addr = ++ dmam_alloc_coherent(hace_dev->dev, ++ ASPEED_HASH_SRC_DMA_BUF_LEN, ++ &hash_engine->ahash_src_dma_addr, ++ GFP_KERNEL); ++ if (!hash_engine->ahash_src_addr) { ++ dev_err(hace_dev->dev, "Failed to allocate dma buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_hash_start; ++ } ++ ++ hash_engine->buffer_addr = dmam_alloc_coherent(hace_dev->dev, SHA512_BLOCK_SIZE * 2, ++ &hash_engine->buffer_dma_addr, ++ GFP_KERNEL); ++ if (!hash_engine->buffer_addr) { ++ dev_err(hace_dev->dev, "Failed to allocate DMA buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_hash_start; ++ } ++ ++ hash_engine->digest_addr = dmam_alloc_coherent(hace_dev->dev, SHA512_DIGEST_SIZE, ++ &hash_engine->digest_dma_addr, ++ GFP_KERNEL); ++ if (!hash_engine->digest_addr) { ++ dev_err(hace_dev->dev, "Failed to allocate DMA digest buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_hash_start; ++ } ++ ++ /* Hash engine hardware initial done, prepare queue lock */ ++ mutex_init(&hash_engine->queue_lock); ++ ++ return 0; ++ ++err_engine_hash_start: ++ crypto_engine_exit(hace_dev->crypt_engine_hash); ++end: ++ return rc; ++} ++ ++void aspeed_hace_hash_remove(struct aspeed_hace_dev *hace_dev) ++{ ++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; ++ ++ crypto_engine_exit(hace_dev->crypt_engine_hash); ++ tasklet_kill(&hash_engine->done_task); ++} +diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c +--- a/drivers/crypto/aspeed/aspeed-hace.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-hace.c 2025-12-23 10:16:21.140032401 +0000 +@@ -6,15 +6,18 @@ + #include "aspeed-hace.h" + #include + #include ++#include + #include + #include + #include + #include + #include + #include ++#include ++#include ++#include + #include + #include +-#include + + #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG + #define HACE_DBG(d, fmt, ...) \ +@@ -24,6 +27,45 @@ + dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) + #endif + ++static unsigned char *dummy_key1; ++static unsigned char *dummy_key2; ++ ++int find_dummy_key(const char *key, int keylen) ++{ ++ int ret = 0; ++ ++ if (dummy_key1 && memcmp(key, dummy_key1, keylen) == 0) ++ ret = 1; ++ else if (dummy_key2 && memcmp(key, dummy_key2, keylen) == 0) ++ ret = 2; ++ ++ return ret; ++} ++ ++int aspeed_hace_reset(struct aspeed_hace_dev *hace_dev) ++{ ++ int rc; ++ ++ HACE_DBG(hace_dev, "\n"); ++ ++ if (!hace_dev->rst) ++ return -ENODEV; ++ ++ rc = reset_control_assert(hace_dev->rst); ++ if (rc) { ++ dev_err(hace_dev->dev, "Hace reset failed (assert).\n"); ++ return rc; ++ } ++ ++ rc = reset_control_deassert(hace_dev->rst); ++ if (rc) { ++ dev_err(hace_dev->dev, "Hace reset failed (deassert).\n"); ++ return rc; ++ } ++ ++ return 0; ++} ++ + /* HACE interrupt service routine */ + static irqreturn_t aspeed_hace_irq(int irq, void *dev) + { +@@ -51,23 +93,9 @@ + dev_warn(hace_dev->dev, "CRYPTO no active requests.\n"); + } + +- return IRQ_HANDLED; +-} +- +-static void aspeed_hace_crypto_done_task(unsigned long data) +-{ +- struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; +- struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; +- +- crypto_engine->resume(hace_dev); +-} ++ HACE_DBG(hace_dev, "handled\n"); + +-static void aspeed_hace_hash_done_task(unsigned long data) +-{ +- struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; +- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; +- +- hash_engine->resume(hace_dev); ++ return IRQ_HANDLED; + } + + static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) +@@ -93,30 +121,35 @@ + static const struct of_device_id aspeed_hace_of_matches[] = { + { .compatible = "aspeed,ast2500-hace", .data = (void *)5, }, + { .compatible = "aspeed,ast2600-hace", .data = (void *)6, }, ++ { .compatible = "aspeed,ast2700-hace", .data = (void *)7, }, + {}, + }; + + static int aspeed_hace_probe(struct platform_device *pdev) + { +- struct aspeed_engine_crypto *crypto_engine; +- struct aspeed_engine_hash *hash_engine; ++ const struct of_device_id *hace_dev_id; + struct aspeed_hace_dev *hace_dev; ++ struct device *dev = &pdev->dev; + int rc; ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO ++ struct device_node *sec_node; ++ int err; ++#endif + ++ /* Allocate and register hace driver in linux kernel */ + hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), + GFP_KERNEL); + if (!hace_dev) + return -ENOMEM; + +- hace_dev->version = (uintptr_t)device_get_match_data(&pdev->dev); +- if (!hace_dev->version) { ++ hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev); ++ if (!hace_dev_id) { + dev_err(&pdev->dev, "Failed to match hace dev id\n"); + return -EINVAL; + } + + hace_dev->dev = &pdev->dev; +- hash_engine = &hace_dev->hash_engine; +- crypto_engine = &hace_dev->crypto_engine; ++ hace_dev->version = (unsigned long)hace_dev_id->data; + + platform_set_drvdata(pdev, hace_dev); + +@@ -149,115 +182,93 @@ + return rc; + } + +- /* Initialize crypto hardware engine structure for hash */ +- hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, +- true); +- if (!hace_dev->crypt_engine_hash) { +- rc = -ENOMEM; +- goto clk_exit; +- } +- +- rc = crypto_engine_start(hace_dev->crypt_engine_hash); +- if (rc) +- goto err_engine_hash_start; +- +- tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, +- (unsigned long)hace_dev); +- +- /* Initialize crypto hardware engine structure for crypto */ +- hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, +- true); +- if (!hace_dev->crypt_engine_crypto) { +- rc = -ENOMEM; +- goto err_engine_hash_start; +- } +- +- rc = crypto_engine_start(hace_dev->crypt_engine_crypto); +- if (rc) +- goto err_engine_crypto_start; +- +- tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, +- (unsigned long)hace_dev); +- +- /* Allocate DMA buffer for hash engine input used */ +- hash_engine->ahash_src_addr = +- dmam_alloc_coherent(&pdev->dev, +- ASPEED_HASH_SRC_DMA_BUF_LEN, +- &hash_engine->ahash_src_dma_addr, +- GFP_KERNEL); +- if (!hash_engine->ahash_src_addr) { +- dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); +- rc = -ENOMEM; +- goto err_engine_crypto_start; +- } +- +- /* Allocate DMA buffer for crypto engine context used */ +- crypto_engine->cipher_ctx = +- dmam_alloc_coherent(&pdev->dev, +- PAGE_SIZE, +- &crypto_engine->cipher_ctx_dma, +- GFP_KERNEL); +- if (!crypto_engine->cipher_ctx) { +- dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n"); +- rc = -ENOMEM; +- goto err_engine_crypto_start; +- } +- +- /* Allocate DMA buffer for crypto engine input used */ +- crypto_engine->cipher_addr = +- dmam_alloc_coherent(&pdev->dev, +- ASPEED_CRYPTO_SRC_DMA_BUF_LEN, +- &crypto_engine->cipher_dma_addr, +- GFP_KERNEL); +- if (!crypto_engine->cipher_addr) { +- dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n"); +- rc = -ENOMEM; +- goto err_engine_crypto_start; +- } +- +- /* Allocate DMA buffer for crypto engine output used */ +- if (hace_dev->version == AST2600_VERSION) { +- crypto_engine->dst_sg_addr = +- dmam_alloc_coherent(&pdev->dev, +- ASPEED_CRYPTO_DST_DMA_BUF_LEN, +- &crypto_engine->dst_sg_dma_addr, +- GFP_KERNEL); +- if (!crypto_engine->dst_sg_addr) { +- dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n"); +- rc = -ENOMEM; +- goto err_engine_crypto_start; ++ /* Get rst and de-assert reset */ ++ hace_dev->rst = devm_reset_control_get_shared(dev, NULL); ++ if (IS_ERR(hace_dev->rst)) { ++ dev_err(&pdev->dev, "Failed to get hace reset\n"); ++ return PTR_ERR(hace_dev->rst); ++ } ++ ++ rc = reset_control_deassert(hace_dev->rst); ++ if (rc) { ++ dev_err(&pdev->dev, "Deassert hace reset failed\n"); ++ return rc; ++ } ++ ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_warn(&pdev->dev, "No suitable DMA available\n"); ++ return rc; ++ } ++ ++ /* Init mutex lock for supporting hace concurrent*/ ++ sema_init(&hace_dev->lock, 1); ++ ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH ++ rc = aspeed_hace_hash_init(hace_dev); ++ if (rc) { ++ dev_err(&pdev->dev, "Hash init failed\n"); ++ return rc; ++ } ++#endif ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO ++ rc = aspeed_hace_crypto_init(hace_dev); ++ if (rc) { ++ dev_err(&pdev->dev, "Crypto init failed\n"); ++ return rc; ++ } ++ ++ if (of_find_property(dev->of_node, "dummy-key1", NULL)) { ++ dummy_key1 = kzalloc(DUMMY_KEY_SIZE, GFP_KERNEL); ++ if (dummy_key1) { ++ err = of_property_read_u8_array(dev->of_node, "dummy-key1", dummy_key1, DUMMY_KEY_SIZE); ++ if (err) ++ dev_err(dev, "error of reading dummy_key 1\n"); ++ } else { ++ dev_err(dev, "error dummy_key1 allocation\n"); + } + } + ++ if (of_find_property(dev->of_node, "dummy-key2", NULL)) { ++ dummy_key2 = kzalloc(DUMMY_KEY_SIZE, GFP_KERNEL); ++ if (dummy_key2) { ++ err = of_property_read_u8_array(dev->of_node, "dummy-key2", dummy_key2, DUMMY_KEY_SIZE); ++ if (err) ++ dev_err(dev, "error of reading dummy_key 2\n"); ++ } else { ++ dev_err(dev, "error dummy_key2 allocation\n"); ++ } ++ } ++ ++ sec_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sbc"); ++ if (!sec_node) { ++ dev_err(dev, "cannot find sbc node\n"); ++ } else { ++ hace_dev->sec_regs = of_iomap(sec_node, 0); ++ if (!hace_dev->sec_regs) ++ dev_err(dev, "failed to map SBC registers\n"); ++ } ++#endif + aspeed_hace_register(hace_dev); + + dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n"); + + return 0; +- +-err_engine_crypto_start: +- crypto_engine_exit(hace_dev->crypt_engine_crypto); +-err_engine_hash_start: +- crypto_engine_exit(hace_dev->crypt_engine_hash); +-clk_exit: +- clk_disable_unprepare(hace_dev->clk); +- +- return rc; + } + + static void aspeed_hace_remove(struct platform_device *pdev) + { + struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); +- struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; +- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; + + aspeed_hace_unregister(hace_dev); + +- crypto_engine_exit(hace_dev->crypt_engine_hash); +- crypto_engine_exit(hace_dev->crypt_engine_crypto); ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH ++ aspeed_hace_hash_remove(hace_dev); ++#endif + +- tasklet_kill(&hash_engine->done_task); +- tasklet_kill(&crypto_engine->done_task); ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO ++ aspeed_hace_crypto_remove(hace_dev); ++#endif + + clk_disable_unprepare(hace_dev->clk); + } +@@ -266,7 +277,7 @@ + + static struct platform_driver aspeed_hace_driver = { + .probe = aspeed_hace_probe, +- .remove_new = aspeed_hace_remove, ++ .remove = aspeed_hace_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_hace_of_matches, +diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h +--- a/drivers/crypto/aspeed/aspeed-hace.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-hace.h 2025-12-23 10:16:21.140032401 +0000 +@@ -10,6 +10,8 @@ + #include + #include + #include ++#include ++#include + + /***************************** + * * +@@ -23,7 +25,7 @@ + #define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */ + + /* G5 */ +-#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */ ++#define ASPEED_HACE_TAG 0x18 /* HACE Tag Write Buffer Base Address Register */ + /* G6 */ + #define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */ + #define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */ +@@ -36,6 +38,15 @@ + #define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */ + #define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */ + ++/* G7 */ ++#define ASPEED_HACE_SRC_H 0x80 /* Crypto Data Source Base High Address Register */ ++#define ASPEED_HACE_DEST_H 0x84 /* Crypto Data Destination Base High Address Register */ ++#define ASPEED_HACE_CONTEXT_H 0x88 /* Crypto Context Buffer Base High Address Register */ ++#define ASPEED_HACE_TAG_H 0x8C /* HACE Tag Write Buffer Base High Address Register */ ++#define ASPEED_HACE_HASH_SRC_H 0x90 /* Hash Data Source Base High Address Register */ ++#define ASPEED_HACE_HASH_DIGEST_BUFF_H 0x94 /* Hash Digest Write Buffer Base High Address Register */ ++#define ASPEED_HACE_HASH_KEY_BUFF_H 0x98 /* Hash HMAC Key Buffer Base High Address Register */ ++ + /* crypto cmd */ + #define HACE_CMD_SINGLE_DES 0 + #define HACE_CMD_TRIPLE_DES BIT(17) +@@ -109,8 +120,9 @@ + + #define CRYPTO_FLAGS_BUSY BIT(1) + +-#define SHA_OP_UPDATE 1 +-#define SHA_OP_FINAL 2 ++#define SHA_OP_INIT BIT(0) ++#define SHA_OP_UPDATE BIT(1) ++#define SHA_OP_FINAL BIT(2) + + #define SHA_FLAGS_SHA1 BIT(0) + #define SHA_FLAGS_SHA224 BIT(1) +@@ -132,6 +144,8 @@ + #define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \ + HACE_CMD_OFB | HACE_CMD_CTR) + ++#define DUMMY_KEY_SIZE 32 ++ + struct aspeed_hace_dev; + struct scatterlist; + +@@ -147,10 +161,21 @@ + unsigned long flags; + struct ahash_request *req; + ++ /* Protects hash engine operation enqueue in order */ ++ struct mutex queue_lock; ++ + /* input buffer */ + void *ahash_src_addr; + dma_addr_t ahash_src_dma_addr; + ++ /* remain data buffer */ ++ u8 *buffer_addr; ++ dma_addr_t buffer_dma_addr; ++ ++ /* output buffer */ ++ void *digest_addr; ++ dma_addr_t digest_dma_addr; ++ + dma_addr_t src_dma; + dma_addr_t digest_dma; + +@@ -192,12 +217,10 @@ + + /* remain data buffer */ + u8 buffer[SHA512_BLOCK_SIZE * 2]; +- dma_addr_t buffer_dma_addr; + size_t bufcnt; /* buffer counter */ + + /* output buffer */ +- u8 digest[SHA512_DIGEST_SIZE] __aligned(64); +- dma_addr_t digest_dma_addr; ++ u8 digest[SHA512_DIGEST_SIZE]; + u64 digcnt[2]; + }; + +@@ -220,12 +243,14 @@ + + /* callback func */ + aspeed_hace_fn_t resume; ++ int load_vault_key; + }; + + struct aspeed_cipher_ctx { + struct aspeed_hace_dev *hace_dev; + int key_len; + u8 key[AES_MAX_KEYLENGTH]; ++ int dummy_key; + + /* callback func */ + aspeed_hace_fn_t start; +@@ -243,11 +268,16 @@ + + struct aspeed_hace_dev { + void __iomem *regs; ++ void __iomem *sec_regs; + struct device *dev; + int irq; + struct clk *clk; ++ struct reset_control *rst; + unsigned long version; + ++ /* Protects hace register access */ ++ struct semaphore lock; ++ + struct crypto_engine *crypt_engine_hash; + struct crypto_engine *crypt_engine_crypto; + +@@ -268,7 +298,8 @@ + + enum aspeed_version { + AST2500_VERSION = 5, +- AST2600_VERSION ++ AST2600_VERSION, ++ AST2700_VERSION, + }; + + #define ast_hace_write(hace, val, offset) \ +@@ -280,5 +311,11 @@ + void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev); + void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); + void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev); ++int aspeed_hace_hash_init(struct aspeed_hace_dev *hace_dev); ++void aspeed_hace_hash_remove(struct aspeed_hace_dev *hace_dev); ++int aspeed_hace_crypto_init(struct aspeed_hace_dev *hace_dev); ++void aspeed_hace_crypto_remove(struct aspeed_hace_dev *hace_dev); ++int find_dummy_key(const char *key, int keylen); ++int aspeed_hace_reset(struct aspeed_hace_dev *dev); + + #endif +diff --git a/drivers/crypto/aspeed/aspeed-rsss-hash.c b/drivers/crypto/aspeed/aspeed-rsss-hash.c +--- a/drivers/crypto/aspeed/aspeed-rsss-hash.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-rsss-hash.c 2025-12-23 10:16:21.140032401 +0000 +@@ -0,0 +1,901 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include "aspeed-rsss.h" ++ ++//#define RSSS_SHA3_POLLING_MODE ++ ++static int aspeed_sha3_self_test(struct aspeed_rsss_dev *rsss_dev) ++{ ++ u32 pattern = 0xbeef; ++ u32 val; ++ ++ ast_rsss_write(rsss_dev, pattern, ASPEED_SHA3_SRC_LO); ++ val = ast_rsss_read(rsss_dev, ASPEED_SHA3_SRC_LO); ++ if (val != pattern) ++ return -EIO; ++ ++ ast_rsss_write(rsss_dev, 0x0, ASPEED_SHA3_SRC_LO); ++ val = ast_rsss_read(rsss_dev, ASPEED_SHA3_SRC_LO); ++ if (val) ++ return -EIO; ++ ++ return 0; ++} ++ ++static int aspeed_sha3_dma_prepare(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ int length, remain; ++ ++ rctx = ahash_request_ctx(req); ++ remain = (rctx->total + rctx->bufcnt) % rctx->blksize; ++ length = rctx->total + rctx->bufcnt - remain; ++ ++ RSSS_DBG(rsss_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x, %s:0x%x\n", ++ "rctx total", rctx->total, "bufcnt", rctx->bufcnt, ++ "offset", rctx->offset, "length", length, ++ "remain", remain); ++ ++ if (rctx->bufcnt) ++ memcpy(sha3_engine->ahash_src_addr, rctx->buffer, ++ rctx->bufcnt); ++ ++ if (length < ASPEED_HASH_SRC_DMA_BUF_LEN) { ++ scatterwalk_map_and_copy(sha3_engine->ahash_src_addr + rctx->bufcnt, ++ rctx->src_sg, rctx->offset, ++ rctx->total - remain, 0); ++ rctx->offset += rctx->total - remain; ++ ++ } else { ++ dev_warn(rsss_dev->dev, "SHA3 input data length is too large\n"); ++ return -EINVAL; ++ } ++ ++ /* Copy remain data into buffer */ ++ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, ++ rctx->offset, remain, 0); ++ rctx->bufcnt = remain; ++ ++ sha3_engine->src_length = length; ++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr; ++ ++ return 0; ++} ++ ++/* ++ * Prepare DMA buffer as SG list buffer before ++ * hardware engine processing. ++ */ ++static int aspeed_sha3_dma_prepare_sg(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ struct aspeed_sg_list *src_list; ++ struct scatterlist *s; ++ int length, remain, sg_len; ++ int i, rc = 0; ++ ++ rctx = ahash_request_ctx(req); ++ remain = (rctx->total + rctx->bufcnt) % rctx->blksize; ++ length = rctx->total + rctx->bufcnt - remain; ++ ++ RSSS_DBG(rsss_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n", ++ "rctx total", rctx->total, "bufcnt", rctx->bufcnt, ++ "length", length, "remain", remain); ++ ++ sg_len = dma_map_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents, ++ DMA_TO_DEVICE); ++ /* ++ * Need dma_sync_sg_for_device()? ++ */ ++ if (!sg_len) { ++ dev_warn(rsss_dev->dev, "dma_map_sg() src error\n"); ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ src_list = (struct aspeed_sg_list *)sha3_engine->ahash_src_addr; ++ ++ if (rctx->bufcnt != 0) { ++ u64 phy_addr; ++ u32 len; ++ ++ phy_addr = sha3_engine->buffer_dma_addr; ++ len = rctx->bufcnt; ++ length -= len; ++ ++ /* Last sg list */ ++ if (length == 0) ++ len |= SG_LAST_LIST; ++ ++ src_list[0].phy_addr = cpu_to_le64(phy_addr); ++ src_list[0].len = cpu_to_le32(len); ++ ++ RSSS_DBG(rsss_dev, "Remain buffer first, addr:%llx, len:0x%x\n", ++ src_list[0].phy_addr, src_list[0].len); ++ ++ src_list++; ++ } ++ ++ if (length != 0) { ++ for_each_sg(rctx->src_sg, s, sg_len, i) { ++ u64 phy_addr = sg_dma_address(s); ++ u32 len = sg_dma_len(s); ++ u8 *va = sg_virt(s); ++ ++ RSSS_DBG(rsss_dev, "SG[%d] PA:%llx, VA:%llx, len:0x%x\n", ++ i, sg_dma_address(s), (u64)va, len); ++ ++ if (length > len) { ++ length -= len; ++ } else { ++ /* Last sg list */ ++ len = length; ++ len |= SG_LAST_LIST; ++ length = 0; ++ } ++ ++ src_list[i].phy_addr = cpu_to_le64(phy_addr); ++ src_list[i].len = cpu_to_le32(len); ++ ++ len = len & 0xffff; ++ } ++ } ++ ++ if (length != 0) { ++ rc = -EINVAL; ++ goto free_src_sg; ++ } ++ ++ rctx->offset = rctx->total - remain; ++ sha3_engine->src_length = rctx->total + rctx->bufcnt - remain; ++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr; ++ ++ return 0; ++ ++free_src_sg: ++ RSSS_DBG(rsss_dev, "dma_unmap_sg()\n"); ++ dma_unmap_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents, ++ DMA_TO_DEVICE); ++end: ++ return rc; ++} ++ ++static int aspeed_sha3_complete(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ sha3_engine->flags &= ~CRYPTO_FLAGS_BUSY; ++ ++ crypto_finalize_hash_request(rsss_dev->crypt_engine_sha3, req, 0); ++ ++ return 0; ++} ++ ++/* ++ * Copy digest to the corresponding request result. ++ * This function will be called at final() stage. ++ */ ++static int aspeed_sha3_transfer(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ rctx = ahash_request_ctx(req); ++ ++ /* add usleep for DMA done */ ++ udelay(8); ++ memcpy(req->result, sha3_engine->digest_addr, rctx->digsize); ++ ++ return aspeed_sha3_complete(rsss_dev); ++} ++ ++#ifdef RSSS_SHA3_POLLING_MODE ++static int aspeed_sha3_wait_complete(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ u32 sts; ++ int ret; ++ ++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_SHA3_BUSY_STS, sts, ++ ((sts & SHA3_STS) == 0x0), ++ ASPEED_RSSS_POLLING_TIME, ++ ASPEED_RSSS_TIMEOUT * 10); ++ if (ret) { ++ dev_err(rsss_dev->dev, "SHA3 wrong engine status\n"); ++ return -EIO; ++ } ++ ++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSSS_INT_STS, sts, ++ ((sts & SHA3_INT_DONE) == SHA3_INT_DONE), ++ ASPEED_RSSS_POLLING_TIME, ++ ASPEED_RSSS_TIMEOUT); ++ if (ret) { ++ dev_err(rsss_dev->dev, "SHA3 wrong interrupt status\n"); ++ return -EIO; ++ } ++ ++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS); ++ ++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts); ++ ++ if (sts & SHA3_INT_DONE) { ++ if (sha3_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&sha3_engine->done_task); ++ else ++ dev_err(rsss_dev->dev, "SHA3 no active requests.\n"); ++ } ++ ++ return 0; ++} ++#endif ++ ++/* ++ * Trigger hardware engines to do the math. ++ */ ++static int aspeed_sha3_trigger(struct aspeed_rsss_dev *rsss_dev, ++ aspeed_rsss_fn_t resume) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ ++ RSSS_DBG(rsss_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n", ++ &sha3_engine->src_dma, &sha3_engine->digest_dma_addr, ++ sha3_engine->src_length); ++ ++ rctx = ahash_request_ctx(req); ++ sha3_engine->resume = resume; ++ ++ memcpy(sha3_engine->digest_addr, rctx->digest, rctx->digsize); ++ memcpy(sha3_engine->buffer_addr, rctx->buffer, rctx->bufcnt); ++ ++ ast_rsss_write(rsss_dev, sha3_engine->src_dma, ++ ASPEED_SHA3_SRC_LO); ++ /* TODO - SRC_HI */ ++ ast_rsss_write(rsss_dev, sha3_engine->src_dma >> 32, ++ ASPEED_SHA3_SRC_HI); ++ ++ ast_rsss_write(rsss_dev, sha3_engine->digest_dma_addr, ++ ASPEED_SHA3_DST_LO); ++ /* TODO - DST_HI */ ++ ast_rsss_write(rsss_dev, sha3_engine->digest_dma_addr >> 32, ++ ASPEED_SHA3_DST_HI); ++ ++ if (!sha3_engine->sg_mode) ++ ast_rsss_write(rsss_dev, sha3_engine->src_length, ++ ASPEED_SHA3_SRC_LEN); ++ ++ ast_rsss_write(rsss_dev, rctx->cmd, ASPEED_SHA3_CMD); ++ ++ /* Memory barrier to ensure all data setup before engine starts */ ++ mb(); ++ ++ rctx->cmd |= SHA3_CMD_TRIG; ++ ++ RSSS_DBG(rsss_dev, "cmd:0x%x\n", rctx->cmd); ++ ++ ast_rsss_write(rsss_dev, rctx->cmd, ASPEED_SHA3_CMD); ++ ++#ifdef RSSS_SHA3_POLLING_MODE ++ return aspeed_sha3_wait_complete(rsss_dev); ++#else ++ return -EINPROGRESS; ++#endif ++} ++ ++static int aspeed_sha3_req_final(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ int remain_pad; ++ u8 *src; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ /* A0 padding issue */ ++ remain_pad = rctx->blksize - rctx->bufcnt; ++ if (remain_pad < 16) { ++ /* SW padding */ ++ RSSS_DBG(rsss_dev, "Use SW padding, pad size:0x%x\n", ++ remain_pad); ++ src = (u8 *)rctx->buffer; ++ src[rctx->bufcnt] = 0x06; ++ memset(src + rctx->bufcnt + 1, 0, remain_pad - 1); ++ src[rctx->bufcnt + remain_pad - 1] |= 0x80; ++ ++ rctx->bufcnt += remain_pad; ++ ++ } else { ++ rctx->cmd |= SHA3_CMD_HW_PAD; ++ } ++ ++ if (sha3_engine->sg_mode) { ++ struct aspeed_sg_list *src_list = ++ (struct aspeed_sg_list *)sha3_engine->ahash_src_addr; ++ u64 phy_addr; ++ u32 len; ++ ++ phy_addr = sha3_engine->buffer_dma_addr; ++ len = rctx->bufcnt; ++ len |= SG_LAST_LIST; ++ ++ src_list[0].phy_addr = cpu_to_le64(phy_addr); ++ src_list[0].len = cpu_to_le32(len); ++ ++ RSSS_DBG(rsss_dev, "Final SG, addr:%llx, len:0x%x\n", ++ src_list[0].phy_addr, src_list[0].len); ++ ++ rctx->cmd |= SHA3_CMD_SG_MODE; ++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr; ++ ++ } else { ++ sha3_engine->src_dma = sha3_engine->buffer_dma_addr; ++ sha3_engine->src_length = rctx->bufcnt; ++ } ++ ++ rctx->cmd |= SHA3_CMD_ACC_FINAL; ++ ++ return aspeed_sha3_trigger(rsss_dev, aspeed_sha3_transfer); ++} ++ ++static int aspeed_sha3_update_resume(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ rctx = ahash_request_ctx(req); ++ ++ memcpy(rctx->digest, sha3_engine->digest_addr, rctx->digsize); ++ rctx->cmd &= ~SHA3_CMD_TRIG; ++ ++ if (rctx->flags & SHA3_FLAGS_FINUP) ++ return aspeed_sha3_req_final(rsss_dev); ++ ++ return aspeed_sha3_complete(rsss_dev); ++} ++ ++static int aspeed_sha3_update_resume_sg(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ int remain; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ rctx = ahash_request_ctx(req); ++ ++ memcpy(rctx->digest, sha3_engine->digest_addr, rctx->digsize); ++ remain = rctx->total - rctx->offset; ++ ++ RSSS_DBG(rsss_dev, "Copy remain data from 0x%x, size:0x%x\n", ++ rctx->offset, remain); ++ ++ dma_unmap_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents, ++ DMA_TO_DEVICE); ++ ++ scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, ++ remain, 0); ++ ++ rctx->bufcnt = remain; ++ rctx->cmd &= ~(SHA3_CMD_TRIG | SHA3_CMD_SG_MODE); ++ ++ if (rctx->flags & SHA3_FLAGS_FINUP) ++ return aspeed_sha3_req_final(rsss_dev); ++ ++ return aspeed_sha3_complete(rsss_dev); ++} ++ ++static int aspeed_sha3_req_update(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct ahash_request *req = sha3_engine->req; ++ struct aspeed_sha3_reqctx *rctx; ++ aspeed_rsss_fn_t resume; ++ int ret; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ rctx = ahash_request_ctx(req); ++ ++ if (sha3_engine->sg_mode) { ++ rctx->cmd |= SHA3_CMD_SG_MODE; ++ resume = aspeed_sha3_update_resume_sg; ++ ++ } else { ++ resume = aspeed_sha3_update_resume; ++ } ++ ++ ret = sha3_engine->dma_prepare(rsss_dev); ++ if (ret) ++ return ret; ++ ++ return aspeed_sha3_trigger(rsss_dev, resume); ++} ++ ++static int aspeed_sha3_do_request(struct crypto_engine *engine, void *areq) ++{ ++ struct ahash_request *req = ahash_request_cast(areq); ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ struct aspeed_engine_sha3 *sha3_engine; ++ int ret = 0; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ sha3_engine = &rsss_dev->sha3_engine; ++ sha3_engine->flags |= CRYPTO_FLAGS_BUSY; ++ sha3_engine->req = req; ++ ++ if (sha3_engine->sg_mode) ++ sha3_engine->dma_prepare = aspeed_sha3_dma_prepare_sg; ++ else ++ sha3_engine->dma_prepare = aspeed_sha3_dma_prepare; ++ ++ if (rctx->op == SHA_OP_UPDATE) ++ ret = aspeed_sha3_req_update(rsss_dev); ++ else if (rctx->op == SHA_OP_FINAL) ++ ret = aspeed_sha3_req_final(rsss_dev); ++ ++ if (ret != -EINPROGRESS) ++ return ret; ++ ++ return 0; ++} ++ ++static int aspeed_sha3_handle_queue(struct aspeed_rsss_dev *rsss_dev, ++ struct ahash_request *req) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ int ret = 0; ++ ++ if (rctx->op & SHA_OP_INIT) { ++ mutex_lock(&sha3_engine->queue_lock); ++ return 0; ++ } ++ ++ ret = crypto_transfer_hash_request_to_engine(rsss_dev->crypt_engine_sha3, ++ req); ++ ++ /* The last request is enqueued, release the lock */ ++ if (rctx->op & SHA_OP_FINAL || rctx->flags & SHA3_FLAGS_FINUP) ++ mutex_unlock(&sha3_engine->queue_lock); ++ ++ return ret; ++} ++ ++static int aspeed_sha3_update(struct ahash_request *req) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ struct aspeed_engine_sha3 *sha3_engine; ++ ++ RSSS_DBG(rsss_dev, "req->nbytes: %d\n", req->nbytes); ++ ++ sha3_engine = &rsss_dev->sha3_engine; ++ ++ rctx->total = req->nbytes; ++ rctx->src_sg = req->src; ++ rctx->offset = 0; ++ rctx->src_nents = sg_nents(req->src); ++ rctx->op = SHA_OP_UPDATE; ++ ++ RSSS_DBG(rsss_dev, "total:0x%x, src_nents:0x%x\n", rctx->total, rctx->src_nents); ++ ++ rctx->digcnt[0] += rctx->total; ++ if (rctx->digcnt[0] < rctx->total) ++ rctx->digcnt[1]++; ++ ++ if (rctx->bufcnt + rctx->total < rctx->blksize) { ++ scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, ++ rctx->src_sg, rctx->offset, ++ rctx->total, 0); ++ rctx->bufcnt += rctx->total; ++ ++ return 0; ++ } ++ ++ return aspeed_sha3_handle_queue(rsss_dev, req); ++} ++ ++static int aspeed_sha3_final(struct ahash_request *req) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ ++ RSSS_DBG(rsss_dev, "req->nbytes:%d, rctx->total:%d\n", ++ req->nbytes, rctx->total); ++ rctx->op = SHA_OP_FINAL; ++ ++ return aspeed_sha3_handle_queue(rsss_dev, req); ++} ++ ++static int aspeed_sha3_finup(struct ahash_request *req) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ int rc1, rc2; ++ ++ RSSS_DBG(rsss_dev, "req->nbytes: %d\n", req->nbytes); ++ ++ rctx->flags |= SHA3_FLAGS_FINUP; ++ ++ rc1 = aspeed_sha3_update(req); ++ if (rc1 == -EINPROGRESS || rc1 == -EBUSY) ++ return rc1; ++ ++ /* ++ * final() has to be always called to cleanup resources ++ * even if update() failed, except EINPROGRESS ++ */ ++ rc2 = aspeed_sha3_final(req); ++ ++ return rc1 ? : rc2; ++} ++ ++static int aspeed_sha3_init(struct ahash_request *req) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ ++ RSSS_DBG(rsss_dev, "%s: digest size:%d\n", ++ crypto_tfm_alg_name(&tfm->base), ++ crypto_ahash_digestsize(tfm)); ++ ++ rctx->cmd = SHA3_CMD_ACC; ++ rctx->op = SHA_OP_INIT; ++ rctx->flags = 0; ++ ++ switch (crypto_ahash_digestsize(tfm)) { ++ case SHA3_224_DIGEST_SIZE: ++ rctx->cmd |= SHA3_CMD_MODE_224; ++ rctx->flags |= SHA3_FLAGS_SHA224; ++ rctx->digsize = SHA3_224_DIGEST_SIZE; ++ rctx->blksize = SHA3_224_BLOCK_SIZE; ++ break; ++ case SHA3_256_DIGEST_SIZE: ++ rctx->cmd |= SHA3_CMD_MODE_256; ++ rctx->flags |= SHA3_FLAGS_SHA256; ++ rctx->digsize = SHA3_256_DIGEST_SIZE; ++ rctx->blksize = SHA3_256_BLOCK_SIZE; ++ break; ++ case SHA3_384_DIGEST_SIZE: ++ rctx->cmd |= SHA3_CMD_MODE_384; ++ rctx->flags |= SHA3_FLAGS_SHA384; ++ rctx->digsize = SHA3_384_DIGEST_SIZE; ++ rctx->blksize = SHA3_384_BLOCK_SIZE; ++ break; ++ case SHA3_512_DIGEST_SIZE: ++ rctx->cmd |= SHA3_CMD_MODE_512; ++ rctx->flags |= SHA3_FLAGS_SHA512; ++ rctx->digsize = SHA3_512_DIGEST_SIZE; ++ rctx->blksize = SHA3_512_BLOCK_SIZE; ++ break; ++ default: ++ dev_warn(tctx->rsss_dev->dev, "digest size %d not support\n", ++ crypto_ahash_digestsize(tfm)); ++ return -EINVAL; ++ } ++ ++ rctx->bufcnt = 0; ++ rctx->total = 0; ++ rctx->digcnt[0] = 0; ++ rctx->digcnt[1] = 0; ++ ++ memset(rctx->digest, 0x0, SHA3_512_DIGEST_SIZE); ++ ++ return aspeed_sha3_handle_queue(rsss_dev, req); ++} ++ ++static int aspeed_sha3_digest(struct ahash_request *req) ++{ ++ return aspeed_sha3_init(req) ? : aspeed_sha3_finup(req); ++} ++ ++static int aspeed_sha3_export(struct ahash_request *req, void *out) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ ++ memcpy(out, rctx, sizeof(*rctx)); ++ ++ return 0; ++} ++ ++static int aspeed_sha3_import(struct ahash_request *req, const void *in) ++{ ++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req); ++ ++ memcpy(rctx, in, sizeof(*rctx)); ++ ++ return 0; ++} ++ ++static int aspeed_sha3_cra_init(struct crypto_tfm *tfm) ++{ ++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); ++ struct aspeed_sha3_ctx *tctx = crypto_tfm_ctx(tfm); ++ struct aspeed_rsss_alg *ast_alg; ++ ++ ast_alg = container_of(alg, struct aspeed_rsss_alg, alg.ahash.base); ++ tctx->rsss_dev = ast_alg->rsss_dev; ++ ++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), ++ sizeof(struct aspeed_sha3_reqctx)); ++ ++ return 0; ++} ++ ++static void aspeed_sha3_cra_exit(struct crypto_tfm *tfm) ++{ ++ struct aspeed_sha3_ctx *tctx = crypto_tfm_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev; ++ ++ RSSS_DBG(rsss_dev, "%s\n", crypto_tfm_alg_name(tfm)); ++} ++ ++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_224 = { ++ .type = ASPEED_ALGO_TYPE_AHASH, ++ .alg.ahash.base = { ++ .init = aspeed_sha3_init, ++ .update = aspeed_sha3_update, ++ .final = aspeed_sha3_final, ++ .finup = aspeed_sha3_finup, ++ .digest = aspeed_sha3_digest, ++ .export = aspeed_sha3_export, ++ .import = aspeed_sha3_import, ++ .halg = { ++ .digestsize = SHA3_224_DIGEST_SIZE, ++ .statesize = sizeof(struct aspeed_sha3_reqctx), ++ .base = { ++ .cra_name = "sha3-224", ++ .cra_driver_name = "aspeed-sha3-224", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_TYPE_AHASH | ++ CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_KERN_DRIVER_ONLY, ++ .cra_blocksize = SHA3_224_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_init = aspeed_sha3_cra_init, ++ .cra_exit = aspeed_sha3_cra_exit, ++ } ++ } ++ }, ++ .alg.ahash.op = { ++ .do_one_request = aspeed_sha3_do_request, ++ }, ++}; ++ ++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_256 = { ++ .type = ASPEED_ALGO_TYPE_AHASH, ++ .alg.ahash.base = { ++ .init = aspeed_sha3_init, ++ .update = aspeed_sha3_update, ++ .final = aspeed_sha3_final, ++ .finup = aspeed_sha3_finup, ++ .digest = aspeed_sha3_digest, ++ .export = aspeed_sha3_export, ++ .import = aspeed_sha3_import, ++ .halg = { ++ .digestsize = SHA3_256_DIGEST_SIZE, ++ .statesize = sizeof(struct aspeed_sha3_reqctx), ++ .base = { ++ .cra_name = "sha3-256", ++ .cra_driver_name = "aspeed-sha3-256", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_TYPE_AHASH | ++ CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_KERN_DRIVER_ONLY, ++ .cra_blocksize = SHA3_256_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_init = aspeed_sha3_cra_init, ++ .cra_exit = aspeed_sha3_cra_exit, ++ } ++ } ++ }, ++ .alg.ahash.op = { ++ .do_one_request = aspeed_sha3_do_request, ++ }, ++}; ++ ++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_384 = { ++ .type = ASPEED_ALGO_TYPE_AHASH, ++ .alg.ahash.base = { ++ .init = aspeed_sha3_init, ++ .update = aspeed_sha3_update, ++ .final = aspeed_sha3_final, ++ .finup = aspeed_sha3_finup, ++ .digest = aspeed_sha3_digest, ++ .export = aspeed_sha3_export, ++ .import = aspeed_sha3_import, ++ .halg = { ++ .digestsize = SHA3_384_DIGEST_SIZE, ++ .statesize = sizeof(struct aspeed_sha3_reqctx), ++ .base = { ++ .cra_name = "sha3-384", ++ .cra_driver_name = "aspeed-sha3-384", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_TYPE_AHASH | ++ CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_KERN_DRIVER_ONLY, ++ .cra_blocksize = SHA3_384_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_init = aspeed_sha3_cra_init, ++ .cra_exit = aspeed_sha3_cra_exit, ++ } ++ } ++ }, ++ .alg.ahash.op = { ++ .do_one_request = aspeed_sha3_do_request, ++ }, ++}; ++ ++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_512 = { ++ .type = ASPEED_ALGO_TYPE_AHASH, ++ .alg.ahash.base = { ++ .init = aspeed_sha3_init, ++ .update = aspeed_sha3_update, ++ .final = aspeed_sha3_final, ++ .finup = aspeed_sha3_finup, ++ .digest = aspeed_sha3_digest, ++ .export = aspeed_sha3_export, ++ .import = aspeed_sha3_import, ++ .halg = { ++ .digestsize = SHA3_512_DIGEST_SIZE, ++ .statesize = sizeof(struct aspeed_sha3_reqctx), ++ .base = { ++ .cra_name = "sha3-512", ++ .cra_driver_name = "aspeed-sha3-512", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_TYPE_AHASH | ++ CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_KERN_DRIVER_ONLY, ++ .cra_blocksize = SHA3_512_BLOCK_SIZE, ++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx), ++ .cra_module = THIS_MODULE, ++ .cra_init = aspeed_sha3_cra_init, ++ .cra_exit = aspeed_sha3_cra_exit, ++ } ++ } ++ }, ++ .alg.ahash.op = { ++ .do_one_request = aspeed_sha3_do_request, ++ }, ++}; ++ ++static void aspeed_rsss_sha3_done_task(unsigned long data) ++{ ++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)data; ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ ++ (void)sha3_engine->resume(rsss_dev); ++} ++ ++void aspeed_rsss_sha3_exit(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ ++ crypto_engine_exit(rsss_dev->crypt_engine_sha3); ++ tasklet_kill(&sha3_engine->done_task); ++} ++ ++int aspeed_rsss_sha3_init(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_sha3 *sha3_engine; ++ u32 val; ++ int rc; ++ ++ rc = reset_control_deassert(rsss_dev->reset_sha3); ++ if (rc) { ++ dev_err(rsss_dev->dev, "Deassert SHA3 reset failed\n"); ++ goto end; ++ } ++ ++ sha3_engine = &rsss_dev->sha3_engine; ++ ++ /* Initialize crypto hardware engine structure for SHA3 */ ++ rsss_dev->crypt_engine_sha3 = crypto_engine_alloc_init(rsss_dev->dev, true); ++ if (!rsss_dev->crypt_engine_sha3) { ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ rc = crypto_engine_start(rsss_dev->crypt_engine_sha3); ++ if (rc) ++ goto err_engine_sha3_start; ++ ++ tasklet_init(&sha3_engine->done_task, aspeed_rsss_sha3_done_task, ++ (unsigned long)rsss_dev); ++ ++ /* Allocate DMA buffer for hash engine input used */ ++ sha3_engine->ahash_src_addr = ++ dmam_alloc_coherent(rsss_dev->dev, ++ ASPEED_HASH_SRC_DMA_BUF_LEN, ++ &sha3_engine->ahash_src_dma_addr, ++ GFP_KERNEL); ++ if (!sha3_engine->ahash_src_addr) { ++ dev_err(rsss_dev->dev, "Failed to allocate DMA src buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_sha3_start; ++ } ++ ++ sha3_engine->buffer_addr = dmam_alloc_coherent(rsss_dev->dev, SHA3_224_BLOCK_SIZE, ++ &sha3_engine->buffer_dma_addr, ++ GFP_KERNEL); ++ if (!sha3_engine->buffer_addr) { ++ dev_err(rsss_dev->dev, "Failed to allocate DMA buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_sha3_start; ++ } ++ ++ sha3_engine->digest_addr = dmam_alloc_coherent(rsss_dev->dev, SHA3_512_DIGEST_SIZE, ++ &sha3_engine->digest_dma_addr, ++ GFP_KERNEL); ++ if (!sha3_engine->digest_addr) { ++ dev_err(rsss_dev->dev, "Failed to allocate DMA digest buffer\n"); ++ rc = -ENOMEM; ++ goto err_engine_sha3_start; ++ } ++ ++ /* ++ * Set 1 to use scatter-gather mode. ++ * Set 0 to use direct mode. ++ */ ++ sha3_engine->sg_mode = 0; ++ ++ /* Self-test */ ++ rc = aspeed_sha3_self_test(rsss_dev); ++ if (rc) ++ goto err_engine_sha3_start; ++ ++ /* Sha3 engine hardware init done, prepare queue lock */ ++ mutex_init(&sha3_engine->queue_lock); ++ ++ /* Enable SHA3 interrupt */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_EN); ++ ast_rsss_write(rsss_dev, val | SHA3_INT_EN, ASPEED_RSSS_INT_EN); ++ dev_info(rsss_dev->dev, "Aspeed RSSS SHA3 interrupt mode.\n"); ++ ++ dev_info(rsss_dev->dev, "Aspeed RSSS SHA3 initialized (%s mode)\n", ++ sha3_engine->sg_mode ? "SG" : "Direct"); ++ ++ return 0; ++ ++err_engine_sha3_start: ++ crypto_engine_exit(rsss_dev->crypt_engine_sha3); ++end: ++ return rc; ++} +diff --git a/drivers/crypto/aspeed/aspeed-rsss-rsa.c b/drivers/crypto/aspeed/aspeed-rsss-rsa.c +--- a/drivers/crypto/aspeed/aspeed-rsss-rsa.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-rsss-rsa.c 2025-12-23 10:16:21.140032401 +0000 +@@ -0,0 +1,608 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++ ++#include ++#include "aspeed-rsss.h" ++ ++static u8 data_rev[SRAM_BLOCK_SIZE]; ++static u8 data[SRAM_BLOCK_SIZE]; ++static int dbg; ++ ++static void hexdump(char *name, unsigned char *buf, unsigned int len) ++{ ++ if (!dbg) ++ return; ++ ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG ++ pr_info("%s:\n", name); ++ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, ++ 16, 1, buf, len, false); ++#endif ++} ++ ++static int aspeed_rsa_self_test(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine; ++ const u32 pattern = 0xffffffff; ++ u32 val; ++ ++ rsa_engine = &rsss_dev->rsa_engine; ++ ++ /* Set SRAM access control - CPU */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL); ++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL); ++ ++ writel(pattern, rsa_engine->sram_exp); ++ val = readl(rsa_engine->sram_exp); ++ if (val != pattern) ++ return -EIO; ++ ++ writel(0x0, rsa_engine->sram_exp); ++ ++ return 0; ++} ++ ++static inline struct akcipher_request * ++ akcipher_request_cast(struct crypto_async_request *req) ++{ ++ return container_of(req, struct akcipher_request, base); ++} ++ ++static int aspeed_rsa_do_fallback(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher); ++ int err; ++ ++ akcipher_request_set_tfm(req, ctx->fallback_tfm); ++ ++ if (ctx->enc) ++ err = crypto_akcipher_encrypt(req); ++ else ++ err = crypto_akcipher_decrypt(req); ++ ++ akcipher_request_set_tfm(req, cipher); ++ ++ return err; ++} ++ ++static bool aspeed_rsa_need_fallback(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher); ++ ++ return ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN; ++} ++ ++static int aspeed_rsa_handle_queue(struct aspeed_rsss_dev *rsss_dev, ++ struct akcipher_request *req) ++{ ++ if (aspeed_rsa_need_fallback(req)) { ++ RSSS_DBG(rsss_dev, "SW fallback\n"); ++ return aspeed_rsa_do_fallback(req); ++ } ++ ++ return crypto_transfer_akcipher_request_to_engine(rsss_dev->crypt_engine_rsa, req); ++} ++ ++static int aspeed_rsa_do_request(struct crypto_engine *engine, void *areq) ++{ ++ struct akcipher_request *req = akcipher_request_cast(areq); ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher); ++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev; ++ struct aspeed_engine_rsa *rsa_engine; ++ ++ rsa_engine = &rsss_dev->rsa_engine; ++ rsa_engine->req = req; ++ rsa_engine->flags |= CRYPTO_FLAGS_BUSY; ++ ++ return ctx->trigger(rsss_dev); ++} ++ ++static int aspeed_rsa_complete(struct aspeed_rsss_dev *rsss_dev, int err) ++{ ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ struct akcipher_request *req = rsa_engine->req; ++ ++ rsa_engine->flags &= ~CRYPTO_FLAGS_BUSY; ++ ++ crypto_finalize_akcipher_request(rsss_dev->crypt_engine_rsa, req, err); ++ ++ return err; ++} ++ ++/* ++ * Copy Data to SRAM buffer for engine used. ++ */ ++static void aspeed_rsa_sg_copy_to_buffer(struct aspeed_rsss_dev *rsss_dev, ++ void __iomem *buf, struct scatterlist *src, ++ size_t nbytes) ++{ ++ RSSS_DBG(rsss_dev, "src len:%zu\n", nbytes); ++ ++ memset(data_rev, 0, SRAM_BLOCK_SIZE); ++ memset(data, 0, SRAM_BLOCK_SIZE); ++ ++ scatterwalk_map_and_copy(data, src, 0, nbytes, 0); ++ ++ hexdump("data", data, nbytes); ++ for (int i = 0; i < nbytes; i++) ++ data_rev[nbytes - i - 1] = data[i]; ++ ++ /* align 8 bytes */ ++ memcpy_toio(buf, data_rev, (nbytes + 7) & ~(8 - 1)); ++} ++ ++/* ++ * Copy Exp/Mod to SRAM buffer for engine used. ++ * ++ * Params: ++ * - mode 0 : Exponential ++ * - mode 1 : Modulus ++ */ ++static int aspeed_rsa_ctx_copy(struct aspeed_rsss_dev *rsss_dev, void __iomem *dst, ++ const u8 *src, size_t nbytes, ++ enum aspeed_rsa_key_mode mode) ++{ ++ RSSS_DBG(rsss_dev, "nbytes:%zu, mode:%d\n", nbytes, mode); ++ ++ if (nbytes > ASPEED_RSA_MAX_KEY_LEN) ++ return -ENOMEM; ++ ++ memset(data, 0, SRAM_BLOCK_SIZE); ++ ++ /* Remove leading zeros */ ++ while (nbytes > 0 && src[0] == 0) { ++ src++; ++ nbytes--; ++ } ++ ++ for (int i = 0; i < nbytes; i++) ++ data[nbytes - i - 1] = src[i]; ++ ++ /* align 8 bytes */ ++ memcpy_toio(dst, data, (nbytes + 7) & ~(8 - 1)); ++ ++ return nbytes * 8; ++} ++ ++static int aspeed_rsa_transfer(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ struct akcipher_request *req = rsa_engine->req; ++ struct scatterlist *out_sg = req->dst; ++ size_t nbytes = req->dst_len; ++ u8 data[SRAM_BLOCK_SIZE]; ++ u32 val; ++ ++ RSSS_DBG(rsss_dev, "nbytes:%zu\n", nbytes); ++ ++ /* Set SRAM access control - CPU */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL); ++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL); ++ ++ for (int i = 0; i < nbytes; i++) ++ data[nbytes - i - 1] = readb(rsa_engine->sram_data + i); ++ ++ scatterwalk_map_and_copy(data, out_sg, 0, nbytes, 1); ++ ++ return aspeed_rsa_complete(rsss_dev, 0); ++} ++ ++#ifdef RSSS_RSA_POLLING_MODE ++static int aspeed_rsa_wait_complete(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ u32 sts; ++ int ret; ++ ++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSA_ENG_STS, sts, ++ ((sts & RSA_STS) == 0x0), ++ ASPEED_RSSS_POLLING_TIME, ++ ASPEED_RSSS_TIMEOUT * 10); ++ if (ret) { ++ dev_err(rsss_dev->dev, "RSA wrong engine status\n"); ++ return -EIO; ++ } ++ ++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSSS_INT_STS, sts, ++ ((sts & RSA_INT_DONE) == RSA_INT_DONE), ++ ASPEED_RSSS_POLLING_TIME, ++ ASPEED_RSSS_TIMEOUT); ++ if (ret) { ++ dev_err(rsss_dev->dev, "RSA wrong interrupt status\n"); ++ return -EIO; ++ } ++ ++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS); ++ ++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts); ++ ++ if (sts & RSA_INT_DONE) { ++ /* Stop RSA engine */ ++ ast_rsss_write(rsss_dev, 0, ASPEED_RSA_TRIGGER); ++ ++ if (rsa_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&rsa_engine->done_task); ++ else ++ dev_err(rsss_dev->dev, "RSA no active requests.\n"); ++ } ++ ++ return 0; ++} ++#endif ++ ++static int aspeed_rsa_trigger(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ struct akcipher_request *req = rsa_engine->req; ++ struct crypto_akcipher *cipher; ++ struct aspeed_rsa_ctx *ctx; ++ int ne, nm; ++ u32 val; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ cipher = crypto_akcipher_reqtfm(req); ++ ctx = akcipher_tfm_ctx(cipher); ++ ++ if (!ctx->n || !ctx->n_sz) { ++ dev_err(rsss_dev->dev, "%s: key n is not set\n", __func__); ++ return -EINVAL; ++ } ++ ++ /* Set SRAM access control - CPU */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL); ++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL); ++ ++ memset_io(rsa_engine->sram_exp, 0, SRAM_BLOCK_SIZE); ++ memset_io(rsa_engine->sram_mod, 0, SRAM_BLOCK_SIZE); ++ memset_io(rsa_engine->sram_data, 0, SRAM_BLOCK_SIZE); ++ ++ /* Copy source data to SRAM buffer */ ++ aspeed_rsa_sg_copy_to_buffer(rsss_dev, rsa_engine->sram_data, ++ req->src, req->src_len); ++ ++ nm = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_mod, ctx->n, ++ ctx->n_sz, ASPEED_RSA_MOD_MODE); ++ ++ /* Set dst len as modulus size */ ++ req->dst_len = nm / 8; ++ ++ if (ctx->enc) { ++ if (!ctx->e || !ctx->e_sz) { ++ dev_err(rsss_dev->dev, "%s: key e is not set\n", ++ __func__); ++ return -EINVAL; ++ } ++ /* Copy key e to SRAM buffer */ ++ ne = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_exp, ++ ctx->e, ctx->e_sz, ++ ASPEED_RSA_EXP_MODE); ++ } else { ++ if (!ctx->d || !ctx->d_sz) { ++ dev_err(rsss_dev->dev, "%s: key d is not set\n", ++ __func__); ++ return -EINVAL; ++ } ++ /* Copy key d to SRAM buffer */ ++ ne = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_exp, ++ ctx->key.d, ctx->key.d_sz, ++ ASPEED_RSA_EXP_MODE); ++ } ++ ++ hexdump("exp", rsa_engine->sram_exp, ctx->e_sz); ++ hexdump("mod", rsa_engine->sram_mod, ctx->n_sz); ++ hexdump("data", rsa_engine->sram_data, req->src_len); ++ ++ rsa_engine->resume = aspeed_rsa_transfer; ++ ++ ast_rsss_write(rsss_dev, (ne << 16) + nm, ++ ASPEED_RSA_KEY_INFO); ++ ++ /* Set SRAM access control - Engine */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL); ++ ast_rsss_write(rsss_dev, val & ~SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL); ++ ++ /* Trigger RSA engines */ ++ ast_rsss_write(rsss_dev, RSA_TRIGGER, ASPEED_RSA_TRIGGER); ++ ++#ifdef RSSS_RSA_POLLING_MODE ++ return aspeed_rsa_wait_complete(rsss_dev); ++#else ++ return 0; ++#endif ++} ++ ++static int aspeed_rsa_enc(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher); ++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev; ++ ++ ctx->trigger = aspeed_rsa_trigger; ++ ctx->enc = 1; ++ ++ return aspeed_rsa_handle_queue(rsss_dev, req); ++} ++ ++static int aspeed_rsa_dec(struct akcipher_request *req) ++{ ++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher); ++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev; ++ ++ ctx->trigger = aspeed_rsa_trigger; ++ ctx->enc = 0; ++ ++ return aspeed_rsa_handle_queue(rsss_dev, req); ++} ++ ++static u8 *aspeed_rsa_key_copy(u8 *src, size_t len) ++{ ++ return kmemdup(src, len, GFP_KERNEL); ++} ++ ++static int aspeed_rsa_set_n(struct aspeed_rsa_ctx *ctx, u8 *value, ++ size_t len) ++{ ++ ctx->n_sz = len; ++ ctx->n = aspeed_rsa_key_copy(value, len); ++ if (!ctx->n) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int aspeed_rsa_set_e(struct aspeed_rsa_ctx *ctx, u8 *value, ++ size_t len) ++{ ++ ctx->e_sz = len; ++ ctx->e = aspeed_rsa_key_copy(value, len); ++ if (!ctx->e) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int aspeed_rsa_set_d(struct aspeed_rsa_ctx *ctx, u8 *value, ++ size_t len) ++{ ++ ctx->d_sz = len; ++ ctx->d = aspeed_rsa_key_copy(value, len); ++ if (!ctx->d) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void aspeed_rsa_key_free(struct aspeed_rsa_ctx *ctx) ++{ ++ kfree_sensitive(ctx->n); ++ kfree_sensitive(ctx->e); ++ kfree_sensitive(ctx->d); ++ ctx->n_sz = 0; ++ ctx->e_sz = 0; ++ ctx->d_sz = 0; ++} ++ ++static int aspeed_rsa_setkey(struct crypto_akcipher *tfm, const void *key, ++ unsigned int keylen, int priv) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev; ++ int ret; ++ ++ RSSS_DBG(rsss_dev, "\n"); ++ ++ if (priv) ++ ret = rsa_parse_priv_key(&ctx->key, key, keylen); ++ else ++ ret = rsa_parse_pub_key(&ctx->key, key, keylen); ++ ++ if (ret) { ++ dev_err(rsss_dev->dev, "rsss parse key failed, ret:0x%x\n", ++ ret); ++ return ret; ++ } ++ ++ /* Aspeed engine supports up to 4096 bits, ++ * Use software fallback instead. ++ */ ++ if (ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN) ++ return 0; ++ ++ hexdump("n", (u8 *)ctx->key.n, ctx->key.n_sz); ++ ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz); ++ if (ret) ++ goto err; ++ ++ hexdump("e", (u8 *)ctx->key.e, ctx->key.e_sz); ++ ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz); ++ if (ret) ++ goto err; ++ ++ if (priv) { ++ hexdump("d", (u8 *)ctx->key.d, ctx->key.d_sz); ++ ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz); ++ if (ret) ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ dev_err(rsss_dev->dev, "rsss set key failed\n"); ++ aspeed_rsa_key_free(ctx); ++ ++ return ret; ++} ++ ++static int aspeed_rsa_set_pub_key(struct crypto_akcipher *tfm, ++ const void *key, ++ unsigned int keylen) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ int ret; ++ ++ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen); ++ if (ret) ++ return ret; ++ ++ return aspeed_rsa_setkey(tfm, key, keylen, 0); ++} ++ ++static int aspeed_rsa_set_priv_key(struct crypto_akcipher *tfm, ++ const void *key, ++ unsigned int keylen) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ int ret; ++ ++ ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen); ++ if (ret) ++ return ret; ++ ++ return aspeed_rsa_setkey(tfm, key, keylen, 1); ++} ++ ++static unsigned int aspeed_rsa_max_size(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ ++ if (ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN) ++ return crypto_akcipher_maxsize(ctx->fallback_tfm); ++ ++ return ctx->n_sz; ++} ++ ++static int aspeed_rsa_init_tfm(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm); ++ const char *name = crypto_tfm_alg_name(&tfm->base); ++ struct aspeed_rsss_alg *rsa_alg; ++ ++ rsa_alg = container_of(alg, struct aspeed_rsss_alg, alg.akcipher.base); ++ ++ ctx->rsss_dev = rsa_alg->rsss_dev; ++ ++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_NEED_FALLBACK); ++ if (IS_ERR(ctx->fallback_tfm)) { ++ dev_err(ctx->rsss_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", ++ name, PTR_ERR(ctx->fallback_tfm)); ++ return PTR_ERR(ctx->fallback_tfm); ++ } ++ ++ return 0; ++} ++ ++static void aspeed_rsa_exit_tfm(struct crypto_akcipher *tfm) ++{ ++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); ++ ++ crypto_free_akcipher(ctx->fallback_tfm); ++} ++ ++struct aspeed_rsss_alg aspeed_rsss_algs_rsa = { ++ .type = ASPEED_ALGO_TYPE_AKCIPHER, ++ .alg.akcipher.base = { ++ .encrypt = aspeed_rsa_enc, ++ .decrypt = aspeed_rsa_dec, ++ .sign = aspeed_rsa_dec, ++ .verify = aspeed_rsa_enc, ++ .set_pub_key = aspeed_rsa_set_pub_key, ++ .set_priv_key = aspeed_rsa_set_priv_key, ++ .max_size = aspeed_rsa_max_size, ++ .init = aspeed_rsa_init_tfm, ++ .exit = aspeed_rsa_exit_tfm, ++ .base = { ++ .cra_name = "rsa", ++ .cra_driver_name = "aspeed-rsa", ++ .cra_priority = 300, ++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | ++ CRYPTO_ALG_ASYNC | ++ CRYPTO_ALG_KERN_DRIVER_ONLY | ++ CRYPTO_ALG_NEED_FALLBACK, ++ .cra_module = THIS_MODULE, ++ .cra_ctxsize = sizeof(struct aspeed_rsa_ctx), ++ }, ++ }, ++ .alg.akcipher.op = { ++ .do_one_request = aspeed_rsa_do_request, ++ }, ++}; ++ ++static void aspeed_rsa_done_task(unsigned long data) ++{ ++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)data; ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ ++ (void)rsa_engine->resume(rsss_dev); ++} ++ ++void aspeed_rsss_rsa_exit(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ ++ crypto_engine_exit(rsss_dev->crypt_engine_rsa); ++ tasklet_kill(&rsa_engine->done_task); ++} ++ ++int aspeed_rsss_rsa_init(struct aspeed_rsss_dev *rsss_dev) ++{ ++ struct aspeed_engine_rsa *rsa_engine; ++ u32 val; ++ int rc; ++ ++ rc = reset_control_deassert(rsss_dev->reset_rsa); ++ if (rc) { ++ dev_err(rsss_dev->dev, "Deassert RSA reset failed\n"); ++ goto end; ++ } ++ ++ rsa_engine = &rsss_dev->rsa_engine; ++ ++ /* Initialize crypto hardware engine structure for RSA */ ++ rsss_dev->crypt_engine_rsa = crypto_engine_alloc_init(rsss_dev->dev, true); ++ if (!rsss_dev->crypt_engine_rsa) { ++ rc = -ENOMEM; ++ goto end; ++ } ++ ++ rc = crypto_engine_start(rsss_dev->crypt_engine_rsa); ++ if (rc) ++ goto err_engine_rsa_start; ++ ++ tasklet_init(&rsa_engine->done_task, aspeed_rsa_done_task, ++ (unsigned long)rsss_dev); ++ ++ rsa_engine->sram_exp = rsss_dev->regs + SRAM_OFFSET_EXP; ++ rsa_engine->sram_mod = rsss_dev->regs + SRAM_OFFSET_MOD; ++ rsa_engine->sram_data = rsss_dev->regs + SRAM_OFFSET_DATA; ++ ++ /* Set SRAM for RSA operation */ ++ ast_rsss_write(rsss_dev, RSA_OPERATION, ASPEED_RSSS_CTRL); ++ ++ /* Self-test */ ++ rc = aspeed_rsa_self_test(rsss_dev); ++ if (rc) ++ goto err_engine_rsa_start; ++ ++ /* Enable RSA interrupt */ ++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_EN); ++ ast_rsss_write(rsss_dev, val | RSA_INT_EN, ASPEED_RSSS_INT_EN); ++ ++ dev_info(rsss_dev->dev, "Aspeed RSSS RSA initialized\n"); ++ ++ return 0; ++ ++err_engine_rsa_start: ++ crypto_engine_exit(rsss_dev->crypt_engine_rsa); ++end: ++ return rc; ++} +diff --git a/drivers/crypto/aspeed/aspeed-rsss.c b/drivers/crypto/aspeed/aspeed-rsss.c +--- a/drivers/crypto/aspeed/aspeed-rsss.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-rsss.c 2025-12-23 10:16:21.140032401 +0000 +@@ -0,0 +1,188 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include "aspeed-rsss.h" ++ ++static struct aspeed_rsss_alg *aspeed_rsss_algs[] = { ++ &aspeed_rsss_algs_rsa, ++ &aspeed_rsss_algs_sha3_224, ++ &aspeed_rsss_algs_sha3_256, ++ &aspeed_rsss_algs_sha3_384, ++ &aspeed_rsss_algs_sha3_512, ++}; ++ ++static void aspeed_rsss_register(struct aspeed_rsss_dev *rsss_dev) ++{ ++ char *cra_name; ++ int rc; ++ ++ for (int i = 0; i < ARRAY_SIZE(aspeed_rsss_algs); i++) { ++ aspeed_rsss_algs[i]->rsss_dev = rsss_dev; ++ if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AKCIPHER) { ++ rc = crypto_engine_register_akcipher(&aspeed_rsss_algs[i]->alg.akcipher); ++ cra_name = aspeed_rsss_algs[i]->alg.akcipher.base.base.cra_name; ++ ++ } else if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AHASH) { ++ rc = crypto_engine_register_ahash(&aspeed_rsss_algs[i]->alg.ahash); ++ cra_name = aspeed_rsss_algs[i]->alg.ahash.base.halg.base.cra_name; ++ } ++ ++ if (rc) ++ dev_warn(rsss_dev->dev, "Failed to register [%d] %s(0x%x)\n", i, cra_name, rc); ++ } ++} ++ ++static void aspeed_rsss_unregister(struct aspeed_rsss_dev *rsss_dev) ++{ ++ for (int i = 0; i < ARRAY_SIZE(aspeed_rsss_algs); i++) { ++ if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AKCIPHER) ++ crypto_engine_unregister_akcipher(&aspeed_rsss_algs[i]->alg.akcipher); ++ ++ else if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AHASH) ++ crypto_engine_unregister_ahash(&aspeed_rsss_algs[i]->alg.ahash); ++ } ++} ++ ++/* RSSS interrupt service routine. */ ++static irqreturn_t aspeed_rsss_irq(int irq, void *dev) ++{ ++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)dev; ++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine; ++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine; ++ u32 sts; ++ ++ sts = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_STS); ++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS); ++ ++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts); ++ ++ if (sts & RSA_INT_DONE) { ++ /* Stop RSA engine */ ++ ast_rsss_write(rsss_dev, 0, ASPEED_RSA_TRIGGER); ++ ++ if (rsa_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&rsa_engine->done_task); ++ else ++ dev_err(rsss_dev->dev, "RSA no active requests.\n"); ++ } ++ ++ if (sts & SHA3_INT_DONE) { ++ if (sha3_engine->flags & CRYPTO_FLAGS_BUSY) ++ tasklet_schedule(&sha3_engine->done_task); ++ else ++ dev_err(rsss_dev->dev, "SHA3 no active requests.\n"); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static const struct of_device_id aspeed_rsss_of_matches[] = { ++ { .compatible = "aspeed,ast2700-rsss", }, ++ {}, ++}; ++ ++static int aspeed_rsss_probe(struct platform_device *pdev) ++{ ++ struct aspeed_rsss_dev *rsss_dev; ++ struct device *dev = &pdev->dev; ++ int rc; ++ ++ rsss_dev = devm_kzalloc(dev, sizeof(struct aspeed_rsss_dev), ++ GFP_KERNEL); ++ if (!rsss_dev) ++ return -ENOMEM; ++ ++ rsss_dev->dev = dev; ++ ++ platform_set_drvdata(pdev, rsss_dev); ++ ++ rsss_dev->regs = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(rsss_dev->regs)) ++ return PTR_ERR(rsss_dev->regs); ++ ++ /* Get irq number and register it */ ++ rsss_dev->irq = platform_get_irq(pdev, 0); ++ if (rsss_dev->irq < 0) ++ return -ENXIO; ++ ++ rc = devm_request_irq(dev, rsss_dev->irq, aspeed_rsss_irq, 0, ++ dev_name(dev), rsss_dev); ++ if (rc) { ++ dev_err(dev, "Failed to request irq.\n"); ++ return rc; ++ } ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_warn(&pdev->dev, "No suitable DMA available\n"); ++ return rc; ++ } ++ ++ rsss_dev->clk = devm_clk_get_enabled(dev, NULL); ++ if (IS_ERR(rsss_dev->clk)) { ++ dev_err(dev, "Failed to get rsss clk\n"); ++ return PTR_ERR(rsss_dev->clk); ++ } ++ ++ rsss_dev->reset_rsa = devm_reset_control_get(dev, "rsa"); ++ if (IS_ERR(rsss_dev->reset_rsa)) { ++ dev_err(dev, "Failed to get rsa reset\n"); ++ return PTR_ERR(rsss_dev->reset_rsa); ++ } ++ ++ rsss_dev->reset_sha3 = devm_reset_control_get(dev, "sha3"); ++ if (IS_ERR(rsss_dev->reset_sha3)) { ++ dev_err(dev, "Failed to get sha3 reset\n"); ++ return PTR_ERR(rsss_dev->reset_sha3); ++ } ++ ++ rc = aspeed_rsss_rsa_init(rsss_dev); ++ if (rc) { ++ dev_err(dev, "RSA init failed\n"); ++ return rc; ++ } ++ ++ rc = aspeed_rsss_sha3_init(rsss_dev); ++ if (rc) { ++ dev_err(dev, "SHA3 init failed\n"); ++ return rc; ++ } ++ ++ aspeed_rsss_register(rsss_dev); ++ ++ dev_info(dev, "Aspeed RSSS Hardware Accelerator successfully registered\n"); ++ ++ return 0; ++} ++ ++static void aspeed_rsss_remove(struct platform_device *pdev) ++{ ++ struct aspeed_rsss_dev *rsss_dev = platform_get_drvdata(pdev); ++ ++ aspeed_rsss_unregister(rsss_dev); ++ aspeed_rsss_rsa_exit(rsss_dev); ++ aspeed_rsss_sha3_exit(rsss_dev); ++} ++ ++MODULE_DEVICE_TABLE(of, aspeed_rsss_of_matches); ++ ++static struct platform_driver aspeed_rsss_driver = { ++ .probe = aspeed_rsss_probe, ++ .remove = aspeed_rsss_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_rsss_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_rsss_driver); ++ ++MODULE_AUTHOR("Neal Liu "); ++MODULE_DESCRIPTION("ASPEED RSSS driver for multiple cryptographic engines"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/crypto/aspeed/aspeed-rsss.h b/drivers/crypto/aspeed/aspeed-rsss.h +--- a/drivers/crypto/aspeed/aspeed-rsss.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/crypto/aspeed/aspeed-rsss.h 2025-12-23 10:16:21.141032385 +0000 +@@ -0,0 +1,275 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++ ++#ifndef __ASPEED_RSSS_H__ ++#define __ASPEED_RSSS_H__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG ++#define RSSS_DBG(d, fmt, ...) \ ++ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) ++#else ++#define RSSS_DBG(d, fmt, ...) \ ++ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) ++#endif ++ ++/***************************** ++ * * ++ * RSSS register definitions * ++ * * ++ * ***************************/ ++#define ASPEED_RSSS_INT_STS 0xc00 /* RSSS interrupt status */ ++#define ASPEED_RSSS_INT_EN 0xc04 /* RSSS interrupt enable */ ++#define ASPEED_RSSS_CTRL 0xc08 /* RSSS generic control */ ++#define ASPEED_RSA_TRIGGER 0xe00 /* RSA Engine Control: trigger */ ++#define ASPEED_RSA_KEY_INFO 0xe08 /* RSA Exp/Mod Key Length (Bits) */ ++#define ASPEED_RSA_ENG_STS 0xe0c /* RSA Engine Status */ ++ ++#define ASPEED_SHA3_CMD 0xe80 ++#define ASPEED_SHA3_SRC_LO 0xe84 ++#define ASPEED_SHA3_SRC_HI 0xe88 ++#define ASPEED_SHA3_SRC_LEN 0xe8c ++#define ASPEED_SHA3_DST_LO 0xe90 ++#define ASPEED_SHA3_DST_HI 0xe94 ++#define ASPEED_SHA3_BUSY_STS 0xe98 ++#define ASPEED_SHA3_ENG_STS 0xe9c ++ ++/* RSSS interrupt status */ ++#define SM4_INT_DONE BIT(3) ++#define SM3_INT_DONE BIT(2) ++#define SHA3_INT_DONE BIT(1) ++#define RSA_INT_DONE BIT(0) ++ ++/* RSSS interrupt enable */ ++#define SM4_INT_EN BIT(3) ++#define SM3_INT_EN BIT(2) ++#define SHA3_INT_EN BIT(1) ++#define RSA_INT_EN BIT(0) ++ ++/* RSSS generic control */ ++#define RSA_OPERATION (BIT(18) | BIT(19)) ++#define SRAM_AHB_MODE_CPU BIT(16) ++#define SRAM_AHB_MODE_ENGINE 0x0 ++#define SRAM_BUFF_PD (BIT(5) | BIT(4)) ++#define SM4_DISABLE BIT(3) ++#define SM3_DISABLE BIT(2) ++#define SHA3_DISABLE BIT(1) ++ ++/* RSA trigger */ ++#define RSA_TRIGGER BIT(0) ++ ++/* RSA key len */ ++#define RSA_E_BITS_LEN(x) ((x) << 16) ++#define RSA_M_BITS_LEN(x) (x) ++ ++#define RSA_STS (BIT(0) | BIT(1)) ++ ++/* RSA SRAM */ ++#define SRAM_OFFSET_EXP 0x0 ++#define SRAM_OFFSET_MOD 0x400 ++#define SRAM_OFFSET_DATA 0x800 ++#define SRAM_BLOCK_SIZE 0x400 ++ ++#define ASPEED_RSA_MAX_KEY_LEN 512 /* RSA maximum key length (Bytes) */ ++ ++#define CRYPTO_FLAGS_BUSY BIT(1) ++ ++/* SHA3 command */ ++#define SHA3_CMD_TRIG BIT(31) ++#define SHA3_CMD_MODE_224 (0x0 << 28) ++#define SHA3_CMD_MODE_256 (0x1 << 28) ++#define SHA3_CMD_MODE_384 (0x2 << 28) ++#define SHA3_CMD_MODE_512 (0x3 << 28) ++#define SHA3_CMD_MODE_S128 (0x4 << 28) ++#define SHA3_CMD_MODE_S256 (0x5 << 28) ++#define SHA3_CMD_HW_PAD BIT(27) ++#define SHA3_CMD_ACC_FINAL BIT(26) ++#define SHA3_CMD_ACC BIT(25) ++#define SHA3_CMD_SG_MODE BIT(24) ++#define SHA3_CMD_IN_RST BIT(21) ++#define SHA3_CMD_OUT_RST BIT(20) ++#define SHA3_CMD_OUT_LEN(x) ((x) & 0x1ffff) ++ ++#define SHA3_FLAGS_SHA224 BIT(0) ++#define SHA3_FLAGS_SHA256 BIT(1) ++#define SHA3_FLAGS_SHA384 BIT(2) ++#define SHA3_FLAGS_SHA512 BIT(3) ++#define SHA3_FLAGS_FINUP BIT(0xa) ++#define SHA3_FLAGS_MASK (0xff) ++ ++#define SHA3_STS BIT(0) ++ ++#define SG_LAST_LIST BIT(31) ++ ++#define SHA_OP_INIT BIT(0) ++#define SHA_OP_UPDATE BIT(1) ++#define SHA_OP_FINAL BIT(2) ++ ++#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000 ++ ++#define ASPEED_RSSS_POLLING_TIME 100 ++#define ASPEED_RSSS_TIMEOUT 100000 /* 100 ms */ ++ ++struct aspeed_rsss_dev; ++ ++typedef int (*aspeed_rsss_fn_t)(struct aspeed_rsss_dev *); ++ ++struct aspeed_sg_list { ++ __le64 phy_addr; ++ __le32 len; ++}; ++ ++struct aspeed_engine_rsa { ++ struct tasklet_struct done_task; ++ unsigned long flags; ++ struct akcipher_request *req; ++ ++ /* RSA input/output SRAM buffer */ ++ void __iomem *sram_exp; ++ void __iomem *sram_mod; ++ void __iomem *sram_data; ++ ++ /* callback func */ ++ aspeed_rsss_fn_t resume; ++}; ++ ++struct aspeed_engine_sha3 { ++ struct tasklet_struct done_task; ++ unsigned long flags; ++ struct ahash_request *req; ++ ++ /* Protects sha3 engine operation enqueue in order */ ++ struct mutex queue_lock; ++ ++ /* input buffer for SG */ ++ void *ahash_src_addr; ++ dma_addr_t ahash_src_dma_addr; ++ ++ /* input buffer for remain */ ++ void *buffer_addr; ++ dma_addr_t buffer_dma_addr; ++ ++ /* output buffer */ ++ void *digest_addr; ++ dma_addr_t digest_dma_addr; ++ ++ dma_addr_t src_dma; ++ size_t src_length; ++ ++ /* callback func */ ++ aspeed_rsss_fn_t resume; ++ aspeed_rsss_fn_t dma_prepare; ++ ++ unsigned sg_mode:1; ++}; ++ ++struct aspeed_rsss_dev { ++ void __iomem *regs; ++ struct device *dev; ++ int irq; ++ struct clk *clk; ++ struct reset_control *reset_rsa; ++ struct reset_control *reset_sha3; ++ ++ struct crypto_engine *crypt_engine_rsa; ++ struct crypto_engine *crypt_engine_sha3; ++ ++ struct aspeed_engine_rsa rsa_engine; ++ struct aspeed_engine_sha3 sha3_engine; ++}; ++ ++enum aspeed_algo_type { ++ ASPEED_ALGO_TYPE_AKCIPHER, ++ ASPEED_ALGO_TYPE_AHASH, ++}; ++ ++struct aspeed_rsss_alg { ++ struct aspeed_rsss_dev *rsss_dev; ++ enum aspeed_algo_type type; ++ union { ++ struct akcipher_engine_alg akcipher; ++ struct ahash_engine_alg ahash; ++ } alg; ++}; ++ ++/* RSA related */ ++struct aspeed_rsa_ctx { ++ struct aspeed_rsss_dev *rsss_dev; ++ ++ struct rsa_key key; ++ int enc; ++ u8 *n; ++ u8 *e; ++ u8 *d; ++ size_t n_sz; ++ size_t e_sz; ++ size_t d_sz; ++ ++ aspeed_rsss_fn_t trigger; ++ ++ struct crypto_akcipher *fallback_tfm; ++}; ++ ++enum aspeed_rsa_key_mode { ++ ASPEED_RSA_EXP_MODE = 0, ++ ASPEED_RSA_MOD_MODE, ++ ASPEED_RSA_DATA_MODE, ++}; ++ ++/* Hash related */ ++struct aspeed_sha3_ctx { ++ struct aspeed_rsss_dev *rsss_dev; ++}; ++ ++struct aspeed_sha3_reqctx { ++ unsigned long flags; /* final update flag should no use */ ++ unsigned long op; /* final or update */ ++ u32 cmd; /* trigger cmd */ ++ ++ /* walk state */ ++ struct scatterlist *src_sg; ++ int src_nents; ++ unsigned int offset; /* offset in current sg */ ++ unsigned int total; /* per update length */ ++ ++ size_t digsize; ++ size_t blksize; ++ ++ /* remain data buffer */ ++ u8 buffer[SHA3_512_BLOCK_SIZE * 2]; ++ size_t bufcnt; /* buffer counter */ ++ ++ /* output buffer */ ++ u8 digest[SHA3_512_DIGEST_SIZE]; ++ u64 digcnt[2]; ++}; ++ ++/******************************************************************************/ ++ ++#define ast_rsss_write(rsss, val, offset) \ ++ writel((val), (rsss)->regs + (offset)) ++ ++#define ast_rsss_read(rsss, offset) \ ++ readl((rsss)->regs + (offset)) ++ ++int aspeed_rsss_rsa_init(struct aspeed_rsss_dev *rsss_dev); ++void aspeed_rsss_rsa_exit(struct aspeed_rsss_dev *rsss_dev); ++int aspeed_rsss_sha3_init(struct aspeed_rsss_dev *rsss_dev); ++void aspeed_rsss_sha3_exit(struct aspeed_rsss_dev *rsss_dev); ++ ++extern struct aspeed_rsss_alg aspeed_rsss_algs_rsa; ++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_224; ++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_256; ++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_384; ++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_512; ++ ++#endif /* __ASPEED_RSSS_H__ */ +diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig +--- a/drivers/edac/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/edac/Kconfig 2025-12-23 10:16:21.149032250 +0000 +@@ -520,6 +520,15 @@ + First, ECC must be configured in the bootloader. Then, this driver + will expose error counters via the EDAC kernel framework. + ++config EDAC_AST2700 ++ tristate "Aspeed AST2700 BMC SoC" ++ depends on ARCH_ASPEED ++ help ++ Support for error detection and correction on the Aspeed AST2700 ++ ++ First, ECC must be configured in the bootloader. Then, this driver ++ will expose error counters via the EDAC kernel framework. ++ + config EDAC_BLUEFIELD + tristate "Mellanox BlueField Memory ECC" + depends on ARM64 && ((MELLANOX_PLATFORM && ACPI) || COMPILE_TEST) +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +--- a/drivers/edac/altera_edac.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/edac/altera_edac.c 2025-12-23 10:16:21.149032250 +0000 +@@ -128,7 +128,6 @@ + + ptemp = dma_alloc_coherent(mci->pdev, 16, &dma_handle, GFP_KERNEL); + if (!ptemp) { +- dma_free_coherent(mci->pdev, 16, ptemp, dma_handle); + edac_printk(KERN_ERR, EDAC_MC, + "Inject: Buffer Allocation error\n"); + return -ENOMEM; +diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c +--- a/drivers/edac/edac_mc_sysfs.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/edac/edac_mc_sysfs.c 2025-12-23 10:16:21.151032217 +0000 +@@ -305,6 +305,14 @@ + channel_dimm_label_show, channel_dimm_label_store, 10); + DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR, + channel_dimm_label_show, channel_dimm_label_store, 11); ++DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 12); ++DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 13); ++DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 14); ++DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR, ++ channel_dimm_label_show, channel_dimm_label_store, 15); + + /* Total possible dynamic DIMM Label attribute file table */ + static struct attribute *dynamic_csrow_dimm_attr[] = { +@@ -320,6 +328,10 @@ + &dev_attr_legacy_ch9_dimm_label.attr.attr, + &dev_attr_legacy_ch10_dimm_label.attr.attr, + &dev_attr_legacy_ch11_dimm_label.attr.attr, ++ &dev_attr_legacy_ch12_dimm_label.attr.attr, ++ &dev_attr_legacy_ch13_dimm_label.attr.attr, ++ &dev_attr_legacy_ch14_dimm_label.attr.attr, ++ &dev_attr_legacy_ch15_dimm_label.attr.attr, + NULL + }; + +@@ -348,6 +360,14 @@ + channel_ce_count_show, NULL, 10); + DEVICE_CHANNEL(ch11_ce_count, S_IRUGO, + channel_ce_count_show, NULL, 11); ++DEVICE_CHANNEL(ch12_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 12); ++DEVICE_CHANNEL(ch13_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 13); ++DEVICE_CHANNEL(ch14_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 14); ++DEVICE_CHANNEL(ch15_ce_count, S_IRUGO, ++ channel_ce_count_show, NULL, 15); + + /* Total possible dynamic ce_count attribute file table */ + static struct attribute *dynamic_csrow_ce_count_attr[] = { +@@ -363,6 +383,10 @@ + &dev_attr_legacy_ch9_ce_count.attr.attr, + &dev_attr_legacy_ch10_ce_count.attr.attr, + &dev_attr_legacy_ch11_ce_count.attr.attr, ++ &dev_attr_legacy_ch12_ce_count.attr.attr, ++ &dev_attr_legacy_ch13_ce_count.attr.attr, ++ &dev_attr_legacy_ch14_ce_count.attr.attr, ++ &dev_attr_legacy_ch15_ce_count.attr.attr, + NULL + }; + +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c +--- a/drivers/edac/i10nm_base.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/edac/i10nm_base.c 2025-12-23 10:16:21.152032200 +0000 +@@ -967,6 +967,15 @@ + return !!GET_BITFIELD(mcmtr, 2, 2); + } + ++static bool i10nm_channel_disabled(struct skx_imc *imc, int chan) ++{ ++ u32 mcmtr = I10NM_GET_MCMTR(imc, chan); ++ ++ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr); ++ ++ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18)); ++} ++ + static int i10nm_get_dimm_config(struct mem_ctl_info *mci, + struct res_config *cfg) + { +@@ -980,6 +989,11 @@ + if (!imc->mbase) + continue; + ++ if (i10nm_channel_disabled(imc, i)) { ++ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i); ++ continue; ++ } ++ + ndimms = 0; + + if (res_cfg->type != GNR) +diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c +--- a/drivers/edac/synopsys_edac.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/edac/synopsys_edac.c 2025-12-23 10:16:21.155032150 +0000 +@@ -332,20 +332,26 @@ + #endif + }; + ++enum synps_platform_type { ++ ZYNQ, ++ ZYNQMP, ++ SYNPS, ++}; ++ + /** + * struct synps_platform_data - synps platform data structure. ++ * @platform: Identifies the target hardware platform + * @get_error_info: Get EDAC error info. + * @get_mtype: Get mtype. + * @get_dtype: Get dtype. +- * @get_ecc_state: Get ECC state. + * @get_mem_info: Get EDAC memory info + * @quirks: To differentiate IPs. + */ + struct synps_platform_data { ++ enum synps_platform_type platform; + int (*get_error_info)(struct synps_edac_priv *priv); + enum mem_type (*get_mtype)(const void __iomem *base); + enum dev_type (*get_dtype)(const void __iomem *base); +- bool (*get_ecc_state)(void __iomem *base); + #ifdef CONFIG_EDAC_DEBUG + u64 (*get_mem_info)(struct synps_edac_priv *priv); + #endif +@@ -720,51 +726,38 @@ + return dt; + } + +-/** +- * zynq_get_ecc_state - Return the controller ECC enable/disable status. +- * @base: DDR memory controller base address. +- * +- * Get the ECC enable/disable status of the controller. +- * +- * Return: true if enabled, otherwise false. +- */ +-static bool zynq_get_ecc_state(void __iomem *base) +-{ +- enum dev_type dt; +- u32 ecctype; +- +- dt = zynq_get_dtype(base); +- if (dt == DEV_UNKNOWN) +- return false; +- +- ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK; +- if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2)) +- return true; +- +- return false; +-} +- +-/** +- * zynqmp_get_ecc_state - Return the controller ECC enable/disable status. +- * @base: DDR memory controller base address. +- * +- * Get the ECC enable/disable status for the controller. +- * +- * Return: a ECC status boolean i.e true/false - enabled/disabled. +- */ +-static bool zynqmp_get_ecc_state(void __iomem *base) ++static bool get_ecc_state(struct synps_edac_priv *priv) + { ++ u32 ecctype, clearval; + enum dev_type dt; +- u32 ecctype; + +- dt = zynqmp_get_dtype(base); +- if (dt == DEV_UNKNOWN) +- return false; +- +- ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK; +- if ((ecctype == SCRUB_MODE_SECDED) && +- ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8))) +- return true; ++ if (priv->p_data->platform == ZYNQ) { ++ dt = zynq_get_dtype(priv->baseaddr); ++ if (dt == DEV_UNKNOWN) ++ return false; ++ ++ ecctype = readl(priv->baseaddr + SCRUB_OFST) & SCRUB_MODE_MASK; ++ if (ecctype == SCRUB_MODE_SECDED && dt == DEV_X2) { ++ clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_UE_ERR; ++ writel(clearval, priv->baseaddr + ECC_CTRL_OFST); ++ writel(0x0, priv->baseaddr + ECC_CTRL_OFST); ++ return true; ++ } ++ } else { ++ dt = zynqmp_get_dtype(priv->baseaddr); ++ if (dt == DEV_UNKNOWN) ++ return false; ++ ++ ecctype = readl(priv->baseaddr + ECC_CFG0_OFST) & SCRUB_MODE_MASK; ++ if (ecctype == SCRUB_MODE_SECDED && ++ (dt == DEV_X2 || dt == DEV_X4 || dt == DEV_X8)) { ++ clearval = readl(priv->baseaddr + ECC_CLR_OFST) | ++ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT | ++ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT; ++ writel(clearval, priv->baseaddr + ECC_CLR_OFST); ++ return true; ++ } ++ } + + return false; + } +@@ -934,18 +927,18 @@ + } + + static const struct synps_platform_data zynq_edac_def = { ++ .platform = ZYNQ, + .get_error_info = zynq_get_error_info, + .get_mtype = zynq_get_mtype, + .get_dtype = zynq_get_dtype, +- .get_ecc_state = zynq_get_ecc_state, + .quirks = 0, + }; + + static const struct synps_platform_data zynqmp_edac_def = { ++ .platform = ZYNQMP, + .get_error_info = zynqmp_get_error_info, + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, +- .get_ecc_state = zynqmp_get_ecc_state, + #ifdef CONFIG_EDAC_DEBUG + .get_mem_info = zynqmp_get_mem_info, + #endif +@@ -957,10 +950,10 @@ + }; + + static const struct synps_platform_data synopsys_edac_def = { ++ .platform = SYNPS, + .get_error_info = zynqmp_get_error_info, + .get_mtype = zynqmp_get_mtype, + .get_dtype = zynqmp_get_dtype, +- .get_ecc_state = zynqmp_get_ecc_state, + .quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR + #ifdef CONFIG_EDAC_DEBUG + | DDR_ECC_DATA_POISON_SUPPORT +@@ -1390,10 +1383,6 @@ + if (!p_data) + return -ENODEV; + +- if (!p_data->get_ecc_state(baseaddr)) { +- edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); +- return -ENXIO; +- } + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = SYNPS_EDAC_NR_CSROWS; +@@ -1413,6 +1402,12 @@ + priv = mci->pvt_info; + priv->baseaddr = baseaddr; + priv->p_data = p_data; ++ if (!get_ecc_state(priv)) { ++ edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n"); ++ rc = -ENODEV; ++ goto free_edac_mc; ++ } ++ + spin_lock_init(&priv->reglock); + + mc_init(mci, pdev); +diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c +--- a/drivers/fsi/fsi-master-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/fsi/fsi-master-aspeed.c 2025-12-23 10:16:20.956035485 +0000 +@@ -3,6 +3,7 @@ + // FSI master driver for AST2600 + + #include ++#include + #include + #include + #include +@@ -24,9 +25,14 @@ + struct device *dev; + void __iomem *base; + struct clk *clk; ++ struct reset_control *rst; + struct gpio_desc *cfam_reset_gpio; + }; + ++struct aspeed_fsi_match_data { ++ bool sup_ahb_access; ++}; ++ + #define to_fsi_master_aspeed(m) \ + container_of(m, struct fsi_master_aspeed, master) + +@@ -60,6 +66,8 @@ + #define OPB1_READ_ORDER2 0x60 + + #define OPB_RETRY_COUNTER 0x64 ++#define OPB_ACCESS_CTRL_REG BIT(18) ++#define OPB_ACCESS_CPU_BRIDGE BIT(19) + + /* OPBn_STATUS */ + #define STATUS_HALFWORD_ACK BIT(0) +@@ -539,6 +547,7 @@ + struct fsi_master_aspeed *aspeed; + int rc, links, reg; + __be32 raw; ++ const struct aspeed_fsi_match_data *match_data; + + rc = tacoma_cabled_fsi_fixup(&pdev->dev); + if (rc) { +@@ -558,16 +567,22 @@ + goto err_free_aspeed; + } + ++ aspeed->rst = devm_reset_control_get_shared(&pdev->dev, NULL); ++ if (IS_ERR(aspeed->rst)) ++ dev_warn(aspeed->dev, "couldn't get reset\n"); ++ else ++ reset_control_deassert(aspeed->rst); ++ + aspeed->clk = devm_clk_get(aspeed->dev, NULL); + if (IS_ERR(aspeed->clk)) { + dev_err(aspeed->dev, "couldn't get clock\n"); + rc = PTR_ERR(aspeed->clk); +- goto err_free_aspeed; ++ goto err_reset; + } + rc = clk_prepare_enable(aspeed->clk); + if (rc) { + dev_err(aspeed->dev, "couldn't enable clock\n"); +- goto err_free_aspeed; ++ goto err_reset; + } + + rc = setup_cfam_reset(aspeed); +@@ -580,7 +595,12 @@ + aspeed->base + OPB_IRQ_MASK); + + /* TODO: determine an appropriate value */ +- writel(0x10, aspeed->base + OPB_RETRY_COUNTER); ++ match_data = of_device_get_match_data(&pdev->dev); ++ if (match_data->sup_ahb_access) ++ writel(0x10 | OPB_ACCESS_CTRL_REG | OPB_ACCESS_CPU_BRIDGE, ++ aspeed->base + OPB_RETRY_COUNTER); ++ else ++ writel(0x10, aspeed->base + OPB_RETRY_COUNTER); + + writel(ctrl_base, aspeed->base + OPB_CTRL_BASE); + writel(fsi_base, aspeed->base + OPB_FSI_BASE); +@@ -641,6 +661,9 @@ + + err_release: + clk_disable_unprepare(aspeed->clk); ++err_reset: ++ if (!IS_ERR(aspeed->rst)) ++ reset_control_assert(aspeed->rst); + err_free_aspeed: + kfree(aspeed); + return rc; +@@ -652,10 +675,21 @@ + + fsi_master_unregister(&aspeed->master); + clk_disable_unprepare(aspeed->clk); ++ if (!IS_ERR(aspeed->rst)) ++ reset_control_assert(aspeed->rst); + } + ++static const struct aspeed_fsi_match_data ast2600_match_data = { ++ .sup_ahb_access = 0, ++}; ++ ++static const struct aspeed_fsi_match_data ast2700_match_data = { ++ .sup_ahb_access = 1, ++}; ++ + static const struct of_device_id fsi_master_aspeed_match[] = { +- { .compatible = "aspeed,ast2600-fsi-master" }, ++ { .compatible = "aspeed,ast2600-fsi-master", .data = &ast2600_match_data }, ++ { .compatible = "aspeed,ast2700-fsi-master", .data = &ast2700_match_data }, + { }, + }; + MODULE_DEVICE_TABLE(of, fsi_master_aspeed_match); +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +--- a/drivers/gpio/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpio/Kconfig 2025-12-23 10:16:09.425228786 +0000 +@@ -180,6 +180,14 @@ + help + Say Y here to support Aspeed AST2500 SGPIO functionality. + ++config GPIO_ASPEED_LTPI ++ bool "Aspeed LTPI GPIO support" ++ depends on (ARCH_ASPEED || COMPILE_TEST) && OF_GPIO ++ select GPIO_GENERIC ++ select GPIOLIB_IRQCHIP ++ help ++ Say Y here to support Aspeed AST2700 LTPI GPIO functionality. ++ + config GPIO_ATH79 + tristate "Atheros AR71XX/AR724X/AR913X GPIO support" + default y if ATH79 +diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile +--- a/drivers/gpio/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpio/Makefile 2025-12-23 10:16:13.585159025 +0000 +@@ -36,6 +36,7 @@ + obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o + obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o + obj-$(CONFIG_GPIO_ASPEED_SGPIO) += gpio-aspeed-sgpio.o ++obj-$(CONFIG_GPIO_ASPEED_LTPI) += gpio-aspeed-ltpi.o + obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o + obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o + obj-$(CONFIG_GPIO_BCM_XGS_IPROC) += gpio-xgs-iproc.o +diff --git a/drivers/gpio/gpio-aspeed-ltpi.c b/drivers/gpio/gpio-aspeed-ltpi.c +--- a/drivers/gpio/gpio-aspeed-ltpi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/gpio/gpio-aspeed-ltpi.c 2025-12-23 10:16:21.060033742 +0000 +@@ -0,0 +1,469 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (c) 2024 ASPEED ++ * ++ * Author: Billy Tsai ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define LTPI_GPIO_IRQ_STS_BASE 0x200 ++#define LTPI_GPIO_IRQ_STS_OFFSET(x) (LTPI_GPIO_IRQ_STS_BASE + (x) * 0x4) ++#define LTPI_GPIO_CTRL_REG_BASE 0x0 ++#define LTPI_GPIO_CTRL_REG_OFFSET(x) (LTPI_GPIO_CTRL_REG_BASE + (x) * 0x4) ++#define LTPI_GPIO_OUT_DATA BIT(0) ++#define LTPI_GPIO_IRQ_EN BIT(2) ++#define LTPI_GPIO_IRQ_TYPE0 BIT(3) ++#define LTPI_GPIO_IRQ_TYPE1 BIT(4) ++#define LTPI_GPIO_IRQ_TYPE2 BIT(5) ++#define LTPI_GPIO_RST_TOLERANCE BIT(6) ++#define LTPI_GPIO_IRQ_STS BIT(12) ++#define LTPI_GPIO_IN_DATA BIT(13) ++ ++static inline u32 field_get(u32 _mask, u32 _val) ++{ ++ return (((_val) & (_mask)) >> (ffs(_mask) - 1)); ++} ++ ++static inline u32 field_prep(u32 _mask, u32 _val) ++{ ++ return (((_val) << (ffs(_mask) - 1)) & (_mask)); ++} ++ ++static inline void ast_write_bits(void __iomem *addr, u32 mask, u32 val) ++{ ++ iowrite32((ioread32(addr) & ~(mask)) | field_prep(mask, val), addr); ++} ++ ++static inline void ast_clr_bits(void __iomem *addr, u32 mask) ++{ ++ iowrite32((ioread32(addr) & ~(mask)), addr); ++} ++ ++struct aspeed_ltpi_gpio { ++ struct gpio_chip chip; ++ struct device *dev; ++ raw_spinlock_t lock; ++ void __iomem *base; ++ int irq; ++}; ++ ++static int aspeed_ltpi_gpio_init_valid_mask(struct gpio_chip *gc, ++ unsigned long *valid_mask, ++ unsigned int ngpios) ++{ ++ bitmap_set(valid_mask, 0, ngpios); ++ return 0; ++} ++ ++static void aspeed_ltpi_gpio_irq_init_valid_mask(struct gpio_chip *gc, ++ unsigned long *valid_mask, ++ unsigned int ngpios) ++{ ++ unsigned int i; ++ ++ /* input GPIOs are even bits */ ++ for (i = 0; i < ngpios; i++) { ++ if (i % 2) ++ clear_bit(i, valid_mask); ++ } ++} ++ ++static int aspeed_ltpi_gpio_irq_init_hw(struct gpio_chip *gc) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(gc); ++ void __iomem *addr; ++ unsigned long flags; ++ int i; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ for (i = 0; i < (gc->ngpio >> 1); i++) { ++ addr = gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(i); ++ ast_clr_bits(addr, LTPI_GPIO_IRQ_EN | LTPI_GPIO_IRQ_TYPE0 | ++ LTPI_GPIO_IRQ_TYPE1 | ++ LTPI_GPIO_IRQ_TYPE2); ++ ast_write_bits(addr, LTPI_GPIO_IRQ_STS, 1); ++ } ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++ ++static bool aspeed_ltpi_gpio_is_input(unsigned int offset) ++{ ++ return !(offset % 2); ++} ++ ++static int aspeed_ltpi_gpio_get(struct gpio_chip *gc, unsigned int offset) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(gc); ++ void __iomem const *addr = ++ gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ unsigned long flags; ++ u32 mask; ++ int rc = 0; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ mask = aspeed_ltpi_gpio_is_input(offset) ? LTPI_GPIO_IN_DATA : ++ LTPI_GPIO_OUT_DATA; ++ rc = !!(field_get(mask, ioread32(addr))); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return rc; ++} ++ ++static int ltpi_gpio_set_value(struct gpio_chip *gc, unsigned int offset, ++ int val) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(gc); ++ void __iomem *addr = ++ gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ u32 reg = 0; ++ ++ if (aspeed_ltpi_gpio_is_input(offset)) ++ return -EINVAL; ++ ++ reg = ioread32(addr); ++ ++ if (val) ++ reg |= LTPI_GPIO_OUT_DATA; ++ else ++ reg &= ~LTPI_GPIO_OUT_DATA; ++ ++ iowrite32(reg, addr); ++ ++ return 0; ++} ++ ++static void aspeed_ltpi_gpio_set(struct gpio_chip *gc, unsigned int offset, ++ int val) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(gc); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ltpi_gpio_set_value(gc, offset, val); ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++ ++static int aspeed_ltpi_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) ++{ ++ return aspeed_ltpi_gpio_is_input(offset) ? 0 : -EINVAL; ++} ++ ++static int aspeed_ltpi_gpio_dir_out(struct gpio_chip *gc, unsigned int offset, ++ int val) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(gc); ++ unsigned long flags; ++ int rc; ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ rc = ltpi_gpio_set_value(gc, offset, val); ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return rc; ++} ++ ++static int aspeed_ltpi_gpio_get_direction(struct gpio_chip *gc, ++ unsigned int offset) ++{ ++ return !!aspeed_ltpi_gpio_is_input(offset); ++} ++ ++static void irqd_to_aspeed_ltpi_gpio_data(struct irq_data *d, ++ struct aspeed_ltpi_gpio **gpio, ++ int *offset) ++{ ++ struct aspeed_ltpi_gpio *internal; ++ ++ *offset = irqd_to_hwirq(d); ++ internal = irq_data_get_irq_chip_data(d); ++ WARN_ON(!internal); ++ ++ *gpio = internal; ++} ++ ++static void aspeed_ltpi_gpio_irq_ack(struct irq_data *d) ++{ ++ struct aspeed_ltpi_gpio *gpio; ++ unsigned long flags; ++ void __iomem *status_addr; ++ int offset; ++ ++ irqd_to_aspeed_ltpi_gpio_data(d, &gpio, &offset); ++ ++ status_addr = gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ ast_write_bits(status_addr, LTPI_GPIO_IRQ_STS, 1); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++ ++static void aspeed_ltpi_gpio_irq_set_mask(struct irq_data *d, bool set) ++{ ++ struct aspeed_ltpi_gpio *gpio; ++ unsigned long flags; ++ void __iomem *addr; ++ int offset; ++ ++ irqd_to_aspeed_ltpi_gpio_data(d, &gpio, &offset); ++ addr = gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ ++ /* Unmasking the IRQ */ ++ if (set) ++ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d)); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ if (set) ++ ast_write_bits(addr, LTPI_GPIO_IRQ_EN, 1); ++ else ++ ast_clr_bits(addr, LTPI_GPIO_IRQ_EN); ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ /* Masking the IRQ */ ++ if (!set) ++ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d)); ++} ++ ++static void aspeed_ltpi_gpio_irq_mask(struct irq_data *d) ++{ ++ aspeed_ltpi_gpio_irq_set_mask(d, false); ++} ++ ++static void aspeed_ltpi_gpio_irq_unmask(struct irq_data *d) ++{ ++ aspeed_ltpi_gpio_irq_set_mask(d, true); ++} ++ ++static int aspeed_ltpi_gpio_set_type(struct irq_data *d, unsigned int type) ++{ ++ u32 type0 = 0; ++ u32 type1 = 0; ++ u32 type2 = 0; ++ irq_flow_handler_t handler; ++ struct aspeed_ltpi_gpio *gpio; ++ unsigned long flags; ++ void __iomem *addr; ++ int offset; ++ ++ irqd_to_aspeed_ltpi_gpio_data(d, &gpio, &offset); ++ addr = gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ ++ switch (type & IRQ_TYPE_SENSE_MASK) { ++ case IRQ_TYPE_EDGE_BOTH: ++ type2 = 1; ++ fallthrough; ++ case IRQ_TYPE_EDGE_RISING: ++ type0 = 1; ++ fallthrough; ++ case IRQ_TYPE_EDGE_FALLING: ++ handler = handle_edge_irq; ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ type0 = 1; ++ fallthrough; ++ case IRQ_TYPE_LEVEL_LOW: ++ type1 = 1; ++ handler = handle_level_irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ ast_write_bits(addr, LTPI_GPIO_IRQ_TYPE2, type2); ++ ast_write_bits(addr, LTPI_GPIO_IRQ_TYPE1, type1); ++ ast_write_bits(addr, LTPI_GPIO_IRQ_TYPE0, type0); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ irq_set_handler_locked(d, handler); ++ return 0; ++} ++ ++static void aspeed_ltpi_gpio_irq_handler(struct irq_desc *desc) ++{ ++ struct gpio_chip *gc = irq_desc_get_handler_data(desc); ++ struct irq_chip *ic = irq_desc_get_chip(desc); ++ struct aspeed_ltpi_gpio const *gpio = gpiochip_get_data(gc); ++ unsigned int i, p, banks; ++ unsigned long reg; ++ void __iomem const *addr; ++ ++ chained_irq_enter(ic, desc); ++ ++ banks = DIV_ROUND_UP(gpio->chip.ngpio >> 1, 32); ++ for (i = 0; i < banks; i++) { ++ addr = gpio->base + LTPI_GPIO_IRQ_STS_OFFSET(i); ++ ++ reg = ioread32(addr); ++ ++ for_each_set_bit(p, ®, 32) ++ generic_handle_domain_irq(gc->irq.domain, ++ (i * 32 + p) * 2); ++ } ++ chained_irq_exit(ic, desc); ++} ++ ++static void aspeed_ltpi_gpio_irq_print_chip(struct irq_data *d, ++ struct seq_file *p) ++{ ++ struct aspeed_ltpi_gpio *gpio; ++ int offset; ++ ++ irqd_to_aspeed_ltpi_gpio_data(d, &gpio, &offset); ++ seq_printf(p, dev_name(gpio->dev)); ++} ++ ++static const struct irq_chip aspeed_ltpi_gpio_irq_chip = { ++ .irq_ack = aspeed_ltpi_gpio_irq_ack, ++ .irq_mask = aspeed_ltpi_gpio_irq_mask, ++ .irq_unmask = aspeed_ltpi_gpio_irq_unmask, ++ .irq_set_type = aspeed_ltpi_gpio_set_type, ++ .irq_print_chip = aspeed_ltpi_gpio_irq_print_chip, ++ .flags = IRQCHIP_IMMUTABLE, ++ GPIOCHIP_IRQ_RESOURCE_HELPERS, ++}; ++ ++static int aspeed_ltpi_gpio_setup_irqs(struct aspeed_ltpi_gpio *gpio, ++ struct platform_device *pdev) ++{ ++ int rc; ++ struct gpio_irq_chip *irq; ++ ++ rc = platform_get_irq(pdev, 0); ++ if (rc < 0) ++ return rc; ++ ++ gpio->irq = rc; ++ ++ irq = &gpio->chip.irq; ++ gpio_irq_chip_set_chip(irq, &aspeed_ltpi_gpio_irq_chip); ++ irq->init_valid_mask = aspeed_ltpi_gpio_irq_init_valid_mask; ++ irq->handler = handle_bad_irq; ++ irq->init_hw = aspeed_ltpi_gpio_irq_init_hw; ++ irq->default_type = IRQ_TYPE_NONE; ++ irq->parent_handler = aspeed_ltpi_gpio_irq_handler; ++ irq->parent_handler_data = gpio; ++ irq->parents = &gpio->irq; ++ irq->num_parents = 1; ++ ++ return 0; ++} ++ ++static int aspeed_ltpi_gpio_reset_tolerance(struct gpio_chip *chip, ++ unsigned int offset, bool enable) ++{ ++ struct aspeed_ltpi_gpio *gpio = gpiochip_get_data(chip); ++ unsigned long flags; ++ void __iomem *reg; ++ ++ reg = gpio->base + LTPI_GPIO_CTRL_REG_OFFSET(offset >> 1); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ if (enable) ++ ast_write_bits(reg, LTPI_GPIO_RST_TOLERANCE, 1); ++ else ++ ast_clr_bits(reg, LTPI_GPIO_RST_TOLERANCE); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++ ++static int aspeed_ltpi_gpio_set_config(struct gpio_chip *chip, ++ unsigned int offset, ++ unsigned long config) ++{ ++ unsigned long param = pinconf_to_config_param(config); ++ u32 arg = pinconf_to_config_argument(config); ++ ++ if (param == PIN_CONFIG_PERSIST_STATE) ++ return aspeed_ltpi_gpio_reset_tolerance(chip, offset, arg); ++ ++ return -EOPNOTSUPP; ++} ++ ++static const struct of_device_id aspeed_ltpi_gpio_of_table[] = { ++ { .compatible = "aspeed,ast2700-ltpi-gpio" }, ++ {} ++}; ++ ++MODULE_DEVICE_TABLE(of, aspeed_ltpi_gpio_of_table); ++ ++static int __init aspeed_ltpi_gpio_probe(struct platform_device *pdev) ++{ ++ u32 nr_gpios; ++ struct aspeed_ltpi_gpio *gpio; ++ int rc; ++ ++ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); ++ if (!gpio) ++ return -ENOMEM; ++ ++ gpio->base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(gpio->base)) ++ return PTR_ERR(gpio->base); ++ ++ gpio->dev = &pdev->dev; ++ ++ rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios); ++ if (rc < 0) { ++ dev_err(&pdev->dev, "Could not read ngpios property\n"); ++ return -EINVAL; ++ } ++ ++ raw_spin_lock_init(&gpio->lock); ++ ++ gpio->chip.parent = &pdev->dev; ++ gpio->chip.ngpio = nr_gpios * 2; ++ gpio->chip.init_valid_mask = aspeed_ltpi_gpio_init_valid_mask; ++ gpio->chip.direction_input = aspeed_ltpi_gpio_dir_in; ++ gpio->chip.direction_output = aspeed_ltpi_gpio_dir_out; ++ gpio->chip.get_direction = aspeed_ltpi_gpio_get_direction; ++ gpio->chip.request = NULL; ++ gpio->chip.free = NULL; ++ gpio->chip.get = aspeed_ltpi_gpio_get; ++ gpio->chip.set = aspeed_ltpi_gpio_set; ++ gpio->chip.set_config = aspeed_ltpi_gpio_set_config; ++ gpio->chip.label = dev_name(&pdev->dev); ++ gpio->chip.base = -1; ++ ++ aspeed_ltpi_gpio_setup_irqs(gpio, pdev); ++ ++ rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); ++ if (rc < 0) ++ return rc; ++ ++ return 0; ++} ++ ++static struct platform_driver aspeed_ltpi_gpio_driver = { ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_ltpi_gpio_of_table, ++ }, ++}; ++ ++module_platform_driver_probe(aspeed_ltpi_gpio_driver, aspeed_ltpi_gpio_probe); ++MODULE_DESCRIPTION("Aspeed LTPI GPIO Driver"); +diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c +--- a/drivers/gpio/gpio-aspeed-sgpio.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpio/gpio-aspeed-sgpio.c 2025-12-23 10:16:21.056033809 +0000 +@@ -18,7 +18,51 @@ + #include + #include + +-#define ASPEED_SGPIO_CTRL 0x54 ++#define SGPIO_G7_IRQ_STS_BASE 0x40 ++#define SGPIO_G7_IRQ_STS_OFFSET(x) (SGPIO_G7_IRQ_STS_BASE + (x) * 0x4) ++#define SGPIO_G7_CTRL_REG_BASE 0x80 ++#define SGPIO_G7_CTRL_REG_OFFSET(x) (SGPIO_G7_CTRL_REG_BASE + (x) * 0x4) ++#define SGPIO_G7_OUT_DATA BIT(0) ++#define SGPIO_G7_PARALLEL_OUT_DATA BIT(1) ++#define SGPIO_G7_IRQ_EN BIT(2) ++#define SGPIO_G7_IRQ_TYPE0 BIT(3) ++#define SGPIO_G7_IRQ_TYPE1 BIT(4) ++#define SGPIO_G7_IRQ_TYPE2 BIT(5) ++#define SGPIO_G7_RST_TOLERANCE BIT(6) ++#define SGPIO_G7_INPUT_MASK BIT(9) ++#define SGPIO_G7_HW_BYPASS_EN BIT(10) ++#define SGPIO_G7_HW_IN_SEL BIT(11) ++#define SGPIO_G7_IRQ_STS BIT(12) ++#define SGPIO_G7_IN_DATA BIT(13) ++#define SGPIO_G7_PARALLEL_IN_DATA BIT(14) ++#define SGPIO_G7_SERIAL_OUT_SEL GENMASK(17, 16) ++#define SGPIO_G7_PARALLEL_OUT_SEL GENMASK(19, 18) ++#define SELECT_FROM_CSR 0 ++#define SELECT_FROM_PARALLEL_IN 1 ++#define SELECT_FROM_SERIAL_IN 1 ++ ++#define BMC_CONTROL_START_INDEX 128 ++#define BMC_CONTROL_END_INDEX 143 ++ ++static inline u32 field_get(u32 _mask, u32 _val) ++{ ++ return (((_val) & (_mask)) >> (ffs(_mask) - 1)); ++} ++ ++static inline u32 field_prep(u32 _mask, u32 _val) ++{ ++ return (((_val) << (ffs(_mask) - 1)) & (_mask)); ++} ++ ++static inline void ast_write_bits(void __iomem *addr, u32 mask, u32 val) ++{ ++ iowrite32((ioread32(addr) & ~(mask)) | field_prep(mask, val), addr); ++} ++ ++static inline void ast_clr_bits(void __iomem *addr, u32 mask) ++{ ++ iowrite32((ioread32(addr) & ~(mask)), addr); ++} + + #define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16) + #define ASPEED_SGPIO_ENABLE BIT(0) +@@ -26,6 +70,9 @@ + + struct aspeed_sgpio_pdata { + const u32 pin_mask; ++ const u16 ctrl_reg; ++ const int version; ++ const bool slave; + }; + + struct aspeed_sgpio { +@@ -35,6 +82,8 @@ + raw_spinlock_t lock; + void __iomem *base; + int irq; ++ int version; ++ const struct aspeed_sgpio_pdata *pdata; + }; + + struct aspeed_sgpio_bank { +@@ -166,19 +215,39 @@ + return !(offset % 2); + } + ++static bool aspeed_sgpios_ctrl_by_csr(unsigned int offset) ++{ ++ if (offset >= BMC_CONTROL_START_INDEX && ++ offset <= BMC_CONTROL_END_INDEX) ++ return true; ++ return false; ++} ++ + static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) + { + struct aspeed_sgpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_sgpio_bank *bank = to_bank(offset); ++ const struct aspeed_sgpio_bank *bank; ++ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); + unsigned long flags; + enum aspeed_sgpio_reg reg; + int rc = 0; + + raw_spin_lock_irqsave(&gpio->lock, flags); + +- reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; +- rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); +- ++ if (gpio->version == 7) { ++ if (gpio->pdata->slave && !aspeed_sgpios_ctrl_by_csr(offset)) ++ reg = aspeed_sgpio_is_input(offset) ? ++ SGPIO_G7_PARALLEL_IN_DATA : ++ SGPIO_G7_PARALLEL_OUT_DATA; ++ else ++ reg = aspeed_sgpio_is_input(offset) ? SGPIO_G7_IN_DATA : ++ SGPIO_G7_OUT_DATA; ++ rc = !!(field_get(reg, ioread32(addr))); ++ } else { ++ bank = to_bank(offset); ++ reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; ++ rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); ++ } + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return rc; +@@ -211,6 +280,38 @@ + return 0; + } + ++static int sgpio_g7_set_value(struct gpio_chip *gc, unsigned int offset, ++ int val) ++{ ++ struct aspeed_sgpio *gpio = gpiochip_get_data(gc); ++ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); ++ u32 reg = 0, out_data; ++ ++ if (aspeed_sgpio_is_input(offset)) ++ return -EINVAL; ++ ++ if (gpio->pdata->slave && !aspeed_sgpios_ctrl_by_csr(offset)) { ++ // Ensure the parallel out value control by the software. ++ ast_write_bits(addr, SGPIO_G7_PARALLEL_OUT_SEL, ++ SELECT_FROM_CSR); ++ out_data = SGPIO_G7_PARALLEL_OUT_DATA; ++ } else { ++ // Ensure the serial out value control by the software. ++ ast_write_bits(addr, SGPIO_G7_SERIAL_OUT_SEL, SELECT_FROM_CSR); ++ out_data = SGPIO_G7_OUT_DATA; ++ } ++ reg = ioread32(addr); ++ ++ if (val) ++ reg |= out_data; ++ else ++ reg &= ~out_data; ++ ++ iowrite32(reg, addr); ++ ++ return 0; ++} ++ + static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) + { + struct aspeed_sgpio *gpio = gpiochip_get_data(gc); +@@ -218,7 +319,10 @@ + + raw_spin_lock_irqsave(&gpio->lock, flags); + +- sgpio_set_value(gc, offset, val); ++ if (gpio->version == 7) ++ sgpio_g7_set_value(gc, offset, val); ++ else ++ sgpio_set_value(gc, offset, val); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + } +@@ -238,7 +342,10 @@ + * error-out in sgpio_set_value if this isn't an output GPIO */ + + raw_spin_lock_irqsave(&gpio->lock, flags); +- rc = sgpio_set_value(gc, offset, val); ++ if (gpio->version == 7) ++ rc = sgpio_g7_set_value(gc, offset, val); ++ else ++ rc = sgpio_set_value(gc, offset, val); + raw_spin_unlock_irqrestore(&gpio->lock, flags); + + return rc; +@@ -265,6 +372,19 @@ + *bit = GPIO_BIT(*offset); + } + ++static void irqd_to_aspeed_g7_sgpio_data(struct irq_data *d, ++ struct aspeed_sgpio **gpio, ++ int *offset) ++{ ++ struct aspeed_sgpio *internal; ++ ++ *offset = irqd_to_hwirq(d); ++ internal = irq_data_get_irq_chip_data(d); ++ WARN_ON(!internal); ++ ++ *gpio = internal; ++} ++ + static void aspeed_sgpio_irq_ack(struct irq_data *d) + { + const struct aspeed_sgpio_bank *bank; +@@ -285,6 +405,24 @@ + raw_spin_unlock_irqrestore(&gpio->lock, flags); + } + ++static void aspeed_g7_sgpio_irq_ack(struct irq_data *d) ++{ ++ struct aspeed_sgpio *gpio; ++ unsigned long flags; ++ void __iomem *status_addr; ++ int offset; ++ ++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset); ++ ++ status_addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ ast_write_bits(status_addr, SGPIO_G7_IRQ_STS, 1); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++} ++ + static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set) + { + const struct aspeed_sgpio_bank *bank; +@@ -320,6 +458,32 @@ + + } + ++static void aspeed_g7_sgpio_irq_set_mask(struct irq_data *d, bool set) ++{ ++ struct aspeed_sgpio *gpio; ++ unsigned long flags; ++ void __iomem *addr; ++ int offset; ++ ++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset); ++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); ++ ++ /* Unmasking the IRQ */ ++ if (set) ++ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d)); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ if (set) ++ ast_write_bits(addr, SGPIO_G7_IRQ_EN, 1); ++ else ++ ast_clr_bits(addr, SGPIO_G7_IRQ_EN); ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ /* Masking the IRQ */ ++ if (!set) ++ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d)); ++} ++ + static void aspeed_sgpio_irq_mask(struct irq_data *d) + { + aspeed_sgpio_irq_set_mask(d, false); +@@ -330,6 +494,16 @@ + aspeed_sgpio_irq_set_mask(d, true); + } + ++static void aspeed_g7_sgpio_irq_mask(struct irq_data *d) ++{ ++ aspeed_g7_sgpio_irq_set_mask(d, false); ++} ++ ++static void aspeed_g7_sgpio_irq_unmask(struct irq_data *d) ++{ ++ aspeed_g7_sgpio_irq_set_mask(d, true); ++} ++ + static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type) + { + u32 type0 = 0; +@@ -390,6 +564,53 @@ + return 0; + } + ++static int aspeed_g7_sgpio_set_type(struct irq_data *d, unsigned int type) ++{ ++ u32 type0 = 0; ++ u32 type1 = 0; ++ u32 type2 = 0; ++ irq_flow_handler_t handler; ++ struct aspeed_sgpio *gpio; ++ unsigned long flags; ++ void __iomem *addr; ++ int offset; ++ ++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset); ++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); ++ ++ switch (type & IRQ_TYPE_SENSE_MASK) { ++ case IRQ_TYPE_EDGE_BOTH: ++ type2 = 1; ++ fallthrough; ++ case IRQ_TYPE_EDGE_RISING: ++ type0 = 1; ++ fallthrough; ++ case IRQ_TYPE_EDGE_FALLING: ++ handler = handle_edge_irq; ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ type0 = 1; ++ fallthrough; ++ case IRQ_TYPE_LEVEL_LOW: ++ type1 = 1; ++ handler = handle_level_irq; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE2, type2); ++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE1, type1); ++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE0, type0); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ irq_set_handler_locked(d, handler); ++ return 0; ++} ++ + static void aspeed_sgpio_irq_handler(struct irq_desc *desc) + { + struct gpio_chip *gc = irq_desc_get_handler_data(desc); +@@ -412,6 +633,29 @@ + chained_irq_exit(ic, desc); + } + ++static void aspeed_g7_sgpio_irq_handler(struct irq_desc *desc) ++{ ++ struct gpio_chip *gc = irq_desc_get_handler_data(desc); ++ struct irq_chip *ic = irq_desc_get_chip(desc); ++ struct aspeed_sgpio *gpio = gpiochip_get_data(gc); ++ unsigned int i, p, banks; ++ unsigned long reg; ++ void __iomem *addr; ++ ++ chained_irq_enter(ic, desc); ++ ++ banks = DIV_ROUND_UP(gpio->chip.ngpio >> 1, 32); ++ for (i = 0; i < banks; i++) { ++ addr = gpio->base + SGPIO_G7_IRQ_STS_OFFSET(i); ++ ++ reg = ioread32(addr); ++ ++ for_each_set_bit(p, ®, 32) ++ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2); ++ } ++ chained_irq_exit(ic, desc); ++} ++ + static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p) + { + const struct aspeed_sgpio_bank *bank; +@@ -423,6 +667,15 @@ + seq_printf(p, dev_name(gpio->dev)); + } + ++static void aspeed_g7_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p) ++{ ++ struct aspeed_sgpio *gpio; ++ int offset; ++ ++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset); ++ seq_printf(p, dev_name(gpio->dev)); ++} ++ + static const struct irq_chip aspeed_sgpio_irq_chip = { + .irq_ack = aspeed_sgpio_irq_ack, + .irq_mask = aspeed_sgpio_irq_mask, +@@ -433,6 +686,16 @@ + GPIOCHIP_IRQ_RESOURCE_HELPERS, + }; + ++static const struct irq_chip aspeed_g7_sgpio_irq_chip = { ++ .irq_ack = aspeed_g7_sgpio_irq_ack, ++ .irq_mask = aspeed_g7_sgpio_irq_mask, ++ .irq_unmask = aspeed_g7_sgpio_irq_unmask, ++ .irq_set_type = aspeed_g7_sgpio_set_type, ++ .irq_print_chip = aspeed_g7_sgpio_irq_print_chip, ++ .flags = IRQCHIP_IMMUTABLE, ++ GPIOCHIP_IRQ_RESOURCE_HELPERS, ++}; ++ + static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, + struct platform_device *pdev) + { +@@ -446,41 +709,49 @@ + + gpio->irq = rc; + +- /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */ +- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { +- bank = &aspeed_sgpio_banks[i]; +- /* disable irq enable bits */ +- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable)); +- /* clear status bits */ +- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status)); +- } ++ if (gpio->version != 7) ++ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { ++ bank = &aspeed_sgpio_banks[i]; ++ /* disable irq enable bits */ ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable)); ++ /* clear status bits */ ++ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status)); ++ } + + irq = &gpio->chip.irq; +- gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip); ++ if (gpio->version == 7) ++ gpio_irq_chip_set_chip(irq, &aspeed_g7_sgpio_irq_chip); ++ else ++ gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip); + irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask; + irq->handler = handle_bad_irq; + irq->default_type = IRQ_TYPE_NONE; +- irq->parent_handler = aspeed_sgpio_irq_handler; ++ irq->parent_handler = (gpio->version == 7) ? ++ aspeed_g7_sgpio_irq_handler : ++ aspeed_sgpio_irq_handler; + irq->parent_handler_data = gpio; + irq->parents = &gpio->irq; + irq->num_parents = 1; + +- /* Apply default IRQ settings */ +- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { +- bank = &aspeed_sgpio_banks[i]; +- /* set falling or level-low irq */ +- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); +- /* trigger type is edge */ +- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); +- /* single edge trigger */ +- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); +- } ++ if (gpio->version != 7) ++ /* Apply default IRQ settings */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { ++ bank = &aspeed_sgpio_banks[i]; ++ /* set falling or level-low irq */ ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); ++ /* trigger type is edge */ ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); ++ /* single edge trigger */ ++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); ++ } + + return 0; + } + + static const struct aspeed_sgpio_pdata ast2400_sgpio_pdata = { + .pin_mask = GENMASK(9, 6), ++ .ctrl_reg = 0x54, + }; + + static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip, +@@ -509,38 +780,81 @@ + return 0; + } + ++static int aspeed_g7_sgpio_reset_tolerance(struct gpio_chip *chip, ++ unsigned int offset, bool enable) ++{ ++ struct aspeed_sgpio *gpio = gpiochip_get_data(chip); ++ unsigned long flags; ++ void __iomem *reg; ++ ++ reg = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1); ++ ++ raw_spin_lock_irqsave(&gpio->lock, flags); ++ ++ if (enable) ++ ast_write_bits(reg, SGPIO_G7_RST_TOLERANCE, 1); ++ else ++ ast_clr_bits(reg, SGPIO_G7_RST_TOLERANCE); ++ ++ raw_spin_unlock_irqrestore(&gpio->lock, flags); ++ ++ return 0; ++} ++ + static int aspeed_sgpio_set_config(struct gpio_chip *chip, unsigned int offset, + unsigned long config) + { ++ struct aspeed_sgpio *gpio = gpiochip_get_data(chip); + unsigned long param = pinconf_to_config_param(config); + u32 arg = pinconf_to_config_argument(config); + +- if (param == PIN_CONFIG_PERSIST_STATE) +- return aspeed_sgpio_reset_tolerance(chip, offset, arg); ++ if (param == PIN_CONFIG_PERSIST_STATE) { ++ if (gpio->version == 7) ++ return aspeed_g7_sgpio_reset_tolerance(chip, offset, ++ arg); ++ else ++ return aspeed_sgpio_reset_tolerance(chip, offset, arg); ++ } + + return -ENOTSUPP; + } + + static const struct aspeed_sgpio_pdata ast2600_sgpiom_pdata = { + .pin_mask = GENMASK(10, 6), ++ .ctrl_reg = 0x54, ++}; ++ ++static const struct aspeed_sgpio_pdata ast2700_sgpiom_pdata = { ++ .pin_mask = GENMASK(11, 6), ++ .ctrl_reg = 0x0, ++ .version = 7, ++}; ++ ++static const struct aspeed_sgpio_pdata ast2700_sgpios_pdata = { ++ .pin_mask = GENMASK(11, 6), ++ .ctrl_reg = 0x0, ++ .version = 7, ++ .slave = 1, + }; + + static const struct of_device_id aspeed_sgpio_of_table[] = { + { .compatible = "aspeed,ast2400-sgpio", .data = &ast2400_sgpio_pdata, }, + { .compatible = "aspeed,ast2500-sgpio", .data = &ast2400_sgpio_pdata, }, + { .compatible = "aspeed,ast2600-sgpiom", .data = &ast2600_sgpiom_pdata, }, ++ { .compatible = "aspeed,ast2700-sgpiom", .data = &ast2700_sgpiom_pdata, }, ++ { .compatible = "aspeed,ast2700-sgpios", .data = &ast2700_sgpios_pdata, }, + {} + }; + + MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table); + +-static int __init aspeed_sgpio_probe(struct platform_device *pdev) ++static int aspeed_sgpio_probe(struct platform_device *pdev) + { + u32 nr_gpios, sgpio_freq, sgpio_clk_div, gpio_cnt_regval, pin_mask; +- const struct aspeed_sgpio_pdata *pdata; + struct aspeed_sgpio *gpio; + unsigned long apb_freq; +- int rc; ++ void __iomem *addr; ++ int rc, i; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) +@@ -552,11 +866,12 @@ + + gpio->dev = &pdev->dev; + +- pdata = device_get_match_data(&pdev->dev); +- if (!pdata) ++ gpio->pdata = device_get_match_data(&pdev->dev); ++ if (!gpio->pdata) + return -EINVAL; + +- pin_mask = pdata->pin_mask; ++ pin_mask = gpio->pdata->pin_mask; ++ gpio->version = gpio->pdata->version; + + rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios); + if (rc < 0) { +@@ -568,41 +883,53 @@ + return -EINVAL; + } + +- rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq); +- if (rc < 0) { +- dev_err(&pdev->dev, "Could not read bus-frequency property\n"); +- return -EINVAL; ++ if (gpio->version == 7 && !gpio->pdata->slave) ++ for (i = 0; i < nr_gpios; i++) { ++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(i); ++ ast_write_bits(addr, SGPIO_G7_SERIAL_OUT_SEL, ++ SELECT_FROM_CSR); ++ } ++ ++ if (!gpio->pdata->slave) { ++ rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq); ++ if (rc < 0) { ++ dev_err(&pdev->dev, "Could not read bus-frequency property\n"); ++ return -EINVAL; ++ } ++ ++ gpio->pclk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(gpio->pclk)) { ++ dev_err(&pdev->dev, "devm_clk_get failed\n"); ++ return PTR_ERR(gpio->pclk); ++ } ++ ++ apb_freq = clk_get_rate(gpio->pclk); ++ ++ /* ++ * From the datasheet, ++ * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1) ++ * period = 2 * (GPIO254[31:16] + 1) / PCLK ++ * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK) ++ * frequency = PCLK / (2 * (GPIO254[31:16] + 1)) ++ * frequency * 2 * (GPIO254[31:16] + 1) = PCLK ++ * GPIO254[31:16] = PCLK / (frequency * 2) - 1 ++ */ ++ if (sgpio_freq == 0) ++ return -EINVAL; ++ ++ sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1; ++ ++ if (sgpio_clk_div > (1 << 16) - 1) ++ return -EINVAL; ++ ++ gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask; ++ iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | ++ gpio_cnt_regval | ASPEED_SGPIO_ENABLE, ++ gpio->base + gpio->pdata->ctrl_reg); ++ } else { ++ iowrite32(ASPEED_SGPIO_ENABLE, gpio->base + gpio->pdata->ctrl_reg); + } + +- gpio->pclk = devm_clk_get(&pdev->dev, NULL); +- if (IS_ERR(gpio->pclk)) { +- dev_err(&pdev->dev, "devm_clk_get failed\n"); +- return PTR_ERR(gpio->pclk); +- } +- +- apb_freq = clk_get_rate(gpio->pclk); +- +- /* +- * From the datasheet, +- * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1) +- * period = 2 * (GPIO254[31:16] + 1) / PCLK +- * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK) +- * frequency = PCLK / (2 * (GPIO254[31:16] + 1)) +- * frequency * 2 * (GPIO254[31:16] + 1) = PCLK +- * GPIO254[31:16] = PCLK / (frequency * 2) - 1 +- */ +- if (sgpio_freq == 0) +- return -EINVAL; +- +- sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1; +- +- if (sgpio_clk_div > (1 << 16) - 1) +- return -EINVAL; +- +- gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask; +- iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval | +- ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL); +- + raw_spin_lock_init(&gpio->lock); + + gpio->chip.parent = &pdev->dev; +@@ -629,11 +956,12 @@ + } + + static struct platform_driver aspeed_sgpio_driver = { ++ .probe = aspeed_sgpio_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_sgpio_of_table, + }, + }; + +-module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe); ++module_platform_driver(aspeed_sgpio_driver); + MODULE_DESCRIPTION("Aspeed Serial GPIO Driver"); +diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c +--- a/drivers/gpio/gpio-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpio/gpio-aspeed.c 2025-12-23 10:16:21.049033926 +0000 +@@ -30,6 +30,27 @@ + #include + #include "gpiolib.h" + ++/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */ ++#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) ++#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask)) ++ ++#define GPIO_G7_IRQ_STS_BASE 0x100 ++#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4) ++#define GPIO_G7_CTRL_REG_BASE 0x180 ++#define GPIO_G7_CTRL_REG_OFFSET(x) (GPIO_G7_CTRL_REG_BASE + (x) * 0x4) ++#define GPIO_G7_CTRL_OUT_DATA BIT(0) ++#define GPIO_G7_CTRL_DIR BIT(1) ++#define GPIO_G7_CTRL_IRQ_EN BIT(2) ++#define GPIO_G7_CTRL_IRQ_TYPE0 BIT(3) ++#define GPIO_G7_CTRL_IRQ_TYPE1 BIT(4) ++#define GPIO_G7_CTRL_IRQ_TYPE2 BIT(5) ++#define GPIO_G7_CTRL_RST_TOLERANCE BIT(6) ++#define GPIO_G7_CTRL_DEBOUNCE_SEL1 BIT(7) ++#define GPIO_G7_CTRL_DEBOUNCE_SEL2 BIT(8) ++#define GPIO_G7_CTRL_INPUT_MASK BIT(9) ++#define GPIO_G7_CTRL_IRQ_STS BIT(12) ++#define GPIO_G7_CTRL_IN_DATA BIT(13) ++ + struct aspeed_bank_props { + unsigned int bank; + u32 input; +@@ -39,6 +60,10 @@ + struct aspeed_gpio_config { + unsigned int nr_gpios; + const struct aspeed_bank_props *props; ++ const struct aspeed_gpio_llops *llops; ++ const int *debounce_timers_array; ++ int debounce_timers_num; ++ bool require_dcache; + }; + + /* +@@ -77,7 +102,6 @@ + uint16_t debounce_regs; + uint16_t tolerance_regs; + uint16_t cmdsrc_regs; +- const char names[4][3]; + }; + + /* +@@ -92,6 +116,22 @@ + */ + + static const int debounce_timers[4] = { 0x00, 0x50, 0x54, 0x58 }; ++static const int g7_debounce_timers[4] = { 0x00, 0x00, 0x04, 0x08 }; ++ ++/* ++ * The debounce timers array is used to configure the debounce timer settings.Here’s how it works: ++ * Array Value: Indicates the offset for configuring the debounce timer. ++ * Array Index: Corresponds to the debounce setting register. ++ * The debounce timers array follows this pattern for configuring the debounce setting registers: ++ * Array Index 0: No debounce timer is set; ++ * Array Value is irrelevant (don’t care). ++ * Array Index 1: Debounce setting #2 is set to 1, and debounce setting #1 is set to 0. ++ * Array Value: offset for configuring debounce timer 0 (g4: 0x50, g7: 0x00) ++ * Array Index 2: Debounce setting #2 is set to 0, and debounce setting #1 is set to 1. ++ * Array Value: offset for configuring debounce timer 1 (g4: 0x54, g7: 0x04) ++ * Array Index 3: Debounce setting #2 is set to 1, and debounce setting #1 is set to 1. ++ * Array Value: offset for configuring debounce timer 2 (g4: 0x58, g7: 0x8) ++ */ + + static const struct aspeed_gpio_copro_ops *copro_ops; + static void *copro_data; +@@ -104,7 +144,6 @@ + .debounce_regs = 0x0040, + .tolerance_regs = 0x001c, + .cmdsrc_regs = 0x0060, +- .names = { "A", "B", "C", "D" }, + }, + { + .val_regs = 0x0020, +@@ -113,7 +152,6 @@ + .debounce_regs = 0x0048, + .tolerance_regs = 0x003c, + .cmdsrc_regs = 0x0068, +- .names = { "E", "F", "G", "H" }, + }, + { + .val_regs = 0x0070, +@@ -122,7 +160,6 @@ + .debounce_regs = 0x00b0, + .tolerance_regs = 0x00ac, + .cmdsrc_regs = 0x0090, +- .names = { "I", "J", "K", "L" }, + }, + { + .val_regs = 0x0078, +@@ -131,7 +168,6 @@ + .debounce_regs = 0x0100, + .tolerance_regs = 0x00fc, + .cmdsrc_regs = 0x00e0, +- .names = { "M", "N", "O", "P" }, + }, + { + .val_regs = 0x0080, +@@ -140,7 +176,6 @@ + .debounce_regs = 0x0130, + .tolerance_regs = 0x012c, + .cmdsrc_regs = 0x0110, +- .names = { "Q", "R", "S", "T" }, + }, + { + .val_regs = 0x0088, +@@ -149,7 +184,6 @@ + .debounce_regs = 0x0160, + .tolerance_regs = 0x015c, + .cmdsrc_regs = 0x0140, +- .names = { "U", "V", "W", "X" }, + }, + { + .val_regs = 0x01E0, +@@ -158,7 +192,6 @@ + .debounce_regs = 0x0190, + .tolerance_regs = 0x018c, + .cmdsrc_regs = 0x0170, +- .names = { "Y", "Z", "AA", "AB" }, + }, + { + .val_regs = 0x01e8, +@@ -167,7 +200,6 @@ + .debounce_regs = 0x01c0, + .tolerance_regs = 0x01bc, + .cmdsrc_regs = 0x01a0, +- .names = { "AC", "", "", "" }, + }, + }; + +@@ -187,6 +219,19 @@ + reg_cmdsrc1, + }; + ++struct aspeed_gpio_llops { ++ void (*reg_bit_set)(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg, bool val); ++ bool (*reg_bit_get)(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg); ++ int (*reg_bank_get)(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg); ++ void (*privilege_ctrl)(struct aspeed_gpio *gpio, unsigned int offset, int owner); ++ void (*privilege_init)(struct aspeed_gpio *gpio); ++ bool (*copro_request)(struct aspeed_gpio *gpio, unsigned int offset); ++ void (*copro_release)(struct aspeed_gpio *gpio, unsigned int offset); ++}; ++ + #define GPIO_VAL_VALUE 0x00 + #define GPIO_VAL_DIR 0x04 + +@@ -207,9 +252,9 @@ + #define GPIO_CMDSRC_RESERVED 3 + + /* This will be resolved at compile time */ +-static inline void __iomem *bank_reg(struct aspeed_gpio *gpio, +- const struct aspeed_gpio_bank *bank, +- const enum aspeed_gpio_reg reg) ++static void __iomem *aspeed_gpio_g4_bank_reg(struct aspeed_gpio *gpio, ++ const struct aspeed_gpio_bank *bank, ++ const enum aspeed_gpio_reg reg) + { + switch (reg) { + case reg_val: +@@ -242,14 +287,43 @@ + BUG(); + } + ++static u32 aspeed_gpio_g7_reg_mask(const enum aspeed_gpio_reg reg) ++{ ++ switch (reg) { ++ case reg_val: ++ return GPIO_G7_CTRL_OUT_DATA; ++ case reg_dir: ++ return GPIO_G7_CTRL_DIR; ++ case reg_irq_enable: ++ return GPIO_G7_CTRL_IRQ_EN; ++ case reg_irq_type0: ++ return GPIO_G7_CTRL_IRQ_TYPE0; ++ case reg_irq_type1: ++ return GPIO_G7_CTRL_IRQ_TYPE1; ++ case reg_irq_type2: ++ return GPIO_G7_CTRL_IRQ_TYPE2; ++ case reg_tolerance: ++ return GPIO_G7_CTRL_RST_TOLERANCE; ++ case reg_debounce_sel1: ++ return GPIO_G7_CTRL_DEBOUNCE_SEL1; ++ case reg_debounce_sel2: ++ return GPIO_G7_CTRL_DEBOUNCE_SEL2; ++ case reg_rdata: ++ return GPIO_G7_CTRL_OUT_DATA; ++ case reg_irq_status: ++ return GPIO_G7_CTRL_IRQ_STS; ++ case reg_cmdsrc0: ++ case reg_cmdsrc1: ++ default: ++ WARN_ON_ONCE(1); ++ return 0; ++ } ++} ++ + #define GPIO_BANK(x) ((x) >> 5) + #define GPIO_OFFSET(x) ((x) & 0x1f) + #define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) + +-#define _GPIO_SET_DEBOUNCE(t, o, i) ((!!((t) & BIT(i))) << GPIO_OFFSET(o)) +-#define GPIO_SET_DEBOUNCE1(t, o) _GPIO_SET_DEBOUNCE(t, o, 1) +-#define GPIO_SET_DEBOUNCE2(t, o) _GPIO_SET_DEBOUNCE(t, o, 0) +- + static const struct aspeed_gpio_bank *to_bank(unsigned int offset) + { + unsigned int bank = GPIO_BANK(offset); +@@ -280,11 +354,11 @@ + static inline bool have_gpio(struct aspeed_gpio *gpio, unsigned int offset) + { + const struct aspeed_bank_props *props = find_bank_props(gpio, offset); +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- unsigned int group = GPIO_OFFSET(offset) / 8; + +- return bank->names[group][0] != '\0' && +- (!props || ((props->input | props->output) & GPIO_BIT(offset))); ++ if (offset >= gpio->chip.ngpio) ++ return false; ++ ++ return (!props || ((props->input | props->output) & GPIO_BIT(offset))); + } + + static inline bool have_input(struct aspeed_gpio *gpio, unsigned int offset) +@@ -304,110 +378,49 @@ + return !props || (props->output & GPIO_BIT(offset)); + } + +-static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio, +- const struct aspeed_gpio_bank *bank, +- int bindex, int cmdsrc) +-{ +- void __iomem *c0 = bank_reg(gpio, bank, reg_cmdsrc0); +- void __iomem *c1 = bank_reg(gpio, bank, reg_cmdsrc1); +- u32 bit, reg; +- +- /* +- * Each register controls 4 banks, so take the bottom 2 +- * bits of the bank index, and use them to select the +- * right control bit (0, 8, 16 or 24). +- */ +- bit = BIT((bindex & 3) << 3); +- +- /* Source 1 first to avoid illegal 11 combination */ +- reg = ioread32(c1); +- if (cmdsrc & 2) +- reg |= bit; +- else +- reg &= ~bit; +- iowrite32(reg, c1); +- +- /* Then Source 0 */ +- reg = ioread32(c0); +- if (cmdsrc & 1) +- reg |= bit; +- else +- reg &= ~bit; +- iowrite32(reg, c0); ++static void aspeed_gpio_change_cmd_source(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc) ++{ ++ if (gpio->config->llops->privilege_ctrl) ++ gpio->config->llops->privilege_ctrl(gpio, offset, cmdsrc); + } + + static bool aspeed_gpio_copro_request(struct aspeed_gpio *gpio, + unsigned int offset) + { +- const struct aspeed_gpio_bank *bank = to_bank(offset); ++ if (gpio->config->llops->copro_request) ++ return gpio->config->llops->copro_request(gpio, offset); + +- if (!copro_ops || !gpio->cf_copro_bankmap) +- return false; +- if (!gpio->cf_copro_bankmap[offset >> 3]) +- return false; +- if (!copro_ops->request_access) +- return false; +- +- /* Pause the coprocessor */ +- copro_ops->request_access(copro_data); +- +- /* Change command source back to ARM */ +- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, GPIO_CMDSRC_ARM); +- +- /* Update cache */ +- gpio->dcache[GPIO_BANK(offset)] = ioread32(bank_reg(gpio, bank, reg_rdata)); +- +- return true; ++ return false; + } + + static void aspeed_gpio_copro_release(struct aspeed_gpio *gpio, + unsigned int offset) + { +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- +- if (!copro_ops || !gpio->cf_copro_bankmap) +- return; +- if (!gpio->cf_copro_bankmap[offset >> 3]) +- return; +- if (!copro_ops->release_access) +- return; +- +- /* Change command source back to ColdFire */ +- aspeed_gpio_change_cmd_source(gpio, bank, offset >> 3, +- GPIO_CMDSRC_COLDFIRE); ++ if (gpio->config->llops->copro_release) ++ gpio->config->llops->copro_release(gpio, offset); ++} + +- /* Restart the coprocessor */ +- copro_ops->release_access(copro_data); ++static bool aspeed_gpio_support_copro(struct aspeed_gpio *gpio) ++{ ++ return gpio->config->llops->copro_request && gpio->config->llops->copro_release && ++ gpio->config->llops->privilege_ctrl && gpio->config->llops->privilege_init; + } + + static int aspeed_gpio_get(struct gpio_chip *gc, unsigned int offset) + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); + +- return !!(ioread32(bank_reg(gpio, bank, reg_val)) & GPIO_BIT(offset)); ++ return gpio->config->llops->reg_bit_get(gpio, offset, reg_val); + } + + static void __aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, + int val) + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- void __iomem *addr; +- u32 reg; +- +- addr = bank_reg(gpio, bank, reg_val); +- reg = gpio->dcache[GPIO_BANK(offset)]; + +- if (val) +- reg |= GPIO_BIT(offset); +- else +- reg &= ~GPIO_BIT(offset); +- gpio->dcache[GPIO_BANK(offset)] = reg; +- +- iowrite32(reg, addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_val, val); + /* Flush write */ +- ioread32(addr); ++ gpio->config->llops->reg_bit_get(gpio, offset, reg_val); + } + + static void aspeed_gpio_set(struct gpio_chip *gc, unsigned int offset, +@@ -415,7 +428,7 @@ + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); + unsigned long flags; +- bool copro; ++ bool copro = false; + + raw_spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); +@@ -430,22 +443,16 @@ + static int aspeed_gpio_dir_in(struct gpio_chip *gc, unsigned int offset) + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- void __iomem *addr = bank_reg(gpio, bank, reg_dir); + unsigned long flags; +- bool copro; +- u32 reg; ++ bool copro = false; + + if (!have_input(gpio, offset)) + return -ENOTSUPP; + + raw_spin_lock_irqsave(&gpio->lock, flags); + +- reg = ioread32(addr); +- reg &= ~GPIO_BIT(offset); +- + copro = aspeed_gpio_copro_request(gpio, offset); +- iowrite32(reg, addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 0); + if (copro) + aspeed_gpio_copro_release(gpio, offset); + +@@ -458,23 +465,17 @@ + unsigned int offset, int val) + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- void __iomem *addr = bank_reg(gpio, bank, reg_dir); + unsigned long flags; +- bool copro; +- u32 reg; ++ bool copro = false; + + if (!have_output(gpio, offset)) + return -ENOTSUPP; + + raw_spin_lock_irqsave(&gpio->lock, flags); + +- reg = ioread32(addr); +- reg |= GPIO_BIT(offset); +- + copro = aspeed_gpio_copro_request(gpio, offset); + __aspeed_gpio_set(gc, offset, val); +- iowrite32(reg, addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_dir, 1); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); +@@ -486,7 +487,6 @@ + static int aspeed_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) + { + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); + unsigned long flags; + u32 val; + +@@ -498,7 +498,7 @@ + + raw_spin_lock_irqsave(&gpio->lock, flags); + +- val = ioread32(bank_reg(gpio, bank, reg_dir)) & GPIO_BIT(offset); ++ val = gpio->config->llops->reg_bit_get(gpio, offset, reg_dir); + + raw_spin_unlock_irqrestore(&gpio->lock, flags); + +@@ -507,8 +507,7 @@ + + static inline int irqd_to_aspeed_gpio_data(struct irq_data *d, + struct aspeed_gpio **gpio, +- const struct aspeed_gpio_bank **bank, +- u32 *bit, int *offset) ++ int *offset) + { + struct aspeed_gpio *internal; + +@@ -521,32 +520,25 @@ + return -ENOTSUPP; + + *gpio = internal; +- *bank = to_bank(*offset); +- *bit = GPIO_BIT(*offset); + + return 0; + } + + static void aspeed_gpio_irq_ack(struct irq_data *d) + { +- const struct aspeed_gpio_bank *bank; + struct aspeed_gpio *gpio; + unsigned long flags; +- void __iomem *status_addr; + int rc, offset; +- bool copro; +- u32 bit; ++ bool copro = false; + +- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); ++ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset); + if (rc) + return; + +- status_addr = bank_reg(gpio, bank, reg_irq_status); +- + raw_spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); + +- iowrite32(bit, status_addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_status, 1); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); +@@ -555,20 +547,15 @@ + + static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set) + { +- const struct aspeed_gpio_bank *bank; + struct aspeed_gpio *gpio; + unsigned long flags; +- u32 reg, bit; +- void __iomem *addr; + int rc, offset; +- bool copro; ++ bool copro = false; + +- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); ++ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset); + if (rc) + return; + +- addr = bank_reg(gpio, bank, reg_irq_enable); +- + /* Unmasking the IRQ */ + if (set) + gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d)); +@@ -576,12 +563,7 @@ + raw_spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); + +- reg = ioread32(addr); +- if (set) +- reg |= bit; +- else +- reg &= ~bit; +- iowrite32(reg, addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_enable, set); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); +@@ -607,34 +589,31 @@ + u32 type0 = 0; + u32 type1 = 0; + u32 type2 = 0; +- u32 bit, reg; +- const struct aspeed_gpio_bank *bank; + irq_flow_handler_t handler; + struct aspeed_gpio *gpio; + unsigned long flags; +- void __iomem *addr; + int rc, offset; +- bool copro; ++ bool copro = false; + +- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); ++ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset); + if (rc) + return -EINVAL; + + switch (type & IRQ_TYPE_SENSE_MASK) { + case IRQ_TYPE_EDGE_BOTH: +- type2 |= bit; ++ type2 = 1; + fallthrough; + case IRQ_TYPE_EDGE_RISING: +- type0 |= bit; ++ type0 = 1; + fallthrough; + case IRQ_TYPE_EDGE_FALLING: + handler = handle_edge_irq; + break; + case IRQ_TYPE_LEVEL_HIGH: +- type0 |= bit; ++ type0 = 1; + fallthrough; + case IRQ_TYPE_LEVEL_LOW: +- type1 |= bit; ++ type1 = 1; + handler = handle_level_irq; + break; + default: +@@ -644,20 +623,9 @@ + raw_spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); + +- addr = bank_reg(gpio, bank, reg_irq_type0); +- reg = ioread32(addr); +- reg = (reg & ~bit) | type0; +- iowrite32(reg, addr); +- +- addr = bank_reg(gpio, bank, reg_irq_type1); +- reg = ioread32(addr); +- reg = (reg & ~bit) | type1; +- iowrite32(reg, addr); +- +- addr = bank_reg(gpio, bank, reg_irq_type2); +- reg = ioread32(addr); +- reg = (reg & ~bit) | type2; +- iowrite32(reg, addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type0, type0); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type1, type1); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_irq_type2, type2); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); +@@ -672,7 +640,6 @@ + { + struct gpio_chip *gc = irq_desc_get_handler_data(desc); + struct irq_chip *ic = irq_desc_get_chip(desc); +- struct aspeed_gpio *data = gpiochip_get_data(gc); + unsigned int i, p, banks; + unsigned long reg; + struct aspeed_gpio *gpio = gpiochip_get_data(gc); +@@ -681,9 +648,7 @@ + + banks = DIV_ROUND_UP(gpio->chip.ngpio, 32); + for (i = 0; i < banks; i++) { +- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i]; +- +- reg = ioread32(bank_reg(data, bank, reg_irq_status)); ++ reg = gpio->config->llops->reg_bank_get(gpio, i * 32, reg_irq_status); + + for_each_set_bit(p, ®, 32) + generic_handle_domain_irq(gc->irq.domain, i * 32 + p); +@@ -722,23 +687,12 @@ + { + struct aspeed_gpio *gpio = gpiochip_get_data(chip); + unsigned long flags; +- void __iomem *treg; +- bool copro; +- u32 val; +- +- treg = bank_reg(gpio, to_bank(offset), reg_tolerance); ++ bool copro = false; + + raw_spin_lock_irqsave(&gpio->lock, flags); + copro = aspeed_gpio_copro_request(gpio, offset); + +- val = readl(treg); +- +- if (enable) +- val |= GPIO_BIT(offset); +- else +- val &= ~GPIO_BIT(offset); +- +- writel(val, treg); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_tolerance, enable); + + if (copro) + aspeed_gpio_copro_release(gpio, offset); +@@ -832,21 +786,11 @@ + static void configure_timer(struct aspeed_gpio *gpio, unsigned int offset, + unsigned int timer) + { +- const struct aspeed_gpio_bank *bank = to_bank(offset); +- const u32 mask = GPIO_BIT(offset); +- void __iomem *addr; +- u32 val; +- + /* Note: Debounce timer isn't under control of the command + * source registers, so no need to sync with the coprocessor + */ +- addr = bank_reg(gpio, bank, reg_debounce_sel1); +- val = ioread32(addr); +- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE1(timer, offset), addr); +- +- addr = bank_reg(gpio, bank, reg_debounce_sel2); +- val = ioread32(addr); +- iowrite32((val & ~mask) | GPIO_SET_DEBOUNCE2(timer, offset), addr); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel1, !!(timer & BIT(1))); ++ gpio->config->llops->reg_bit_set(gpio, offset, reg_debounce_sel2, !!(timer & BIT(0))); + } + + static int enable_debounce(struct gpio_chip *chip, unsigned int offset, +@@ -877,15 +821,15 @@ + } + + /* Try to find a timer already configured for the debounce period */ +- for (i = 1; i < ARRAY_SIZE(debounce_timers); i++) { ++ for (i = 1; i < gpio->config->debounce_timers_num; i++) { + u32 cycles; + +- cycles = ioread32(gpio->base + debounce_timers[i]); ++ cycles = ioread32(gpio->base + gpio->config->debounce_timers_array[i]); + if (requested_cycles == cycles) + break; + } + +- if (i == ARRAY_SIZE(debounce_timers)) { ++ if (i == gpio->config->debounce_timers_num) { + int j; + + /* +@@ -899,8 +843,8 @@ + + if (j == ARRAY_SIZE(gpio->timer_users)) { + dev_warn(chip->parent, +- "Debounce timers exhausted, cannot debounce for period %luus\n", +- usecs); ++ "Debounce timers exhausted, cannot debounce for period %luus\n", ++ usecs); + + rc = -EPERM; + +@@ -916,7 +860,7 @@ + + i = j; + +- iowrite32(requested_cycles, gpio->base + debounce_timers[i]); ++ iowrite32(requested_cycles, gpio->base + gpio->config->debounce_timers_array[i]); + } + + if (WARN(i == 0, "Cannot register index of disabled timer\n")) { +@@ -1019,6 +963,9 @@ + const struct aspeed_gpio_bank *bank = to_bank(offset); + unsigned long flags; + ++ if (!aspeed_gpio_support_copro(gpio)) ++ return -EOPNOTSUPP; ++ + if (!gpio->cf_copro_bankmap) + gpio->cf_copro_bankmap = kzalloc(gpio->chip.ngpio >> 3, GFP_KERNEL); + if (!gpio->cf_copro_bankmap) +@@ -1038,7 +985,7 @@ + + /* Switch command source */ + if (gpio->cf_copro_bankmap[bindex] == 1) +- aspeed_gpio_change_cmd_source(gpio, bank, bindex, ++ aspeed_gpio_change_cmd_source(gpio, offset, + GPIO_CMDSRC_COLDFIRE); + + if (vreg_offset) +@@ -1062,9 +1009,11 @@ + struct gpio_chip *chip = gpiod_to_chip(desc); + struct aspeed_gpio *gpio = gpiochip_get_data(chip); + int rc = 0, bindex, offset = gpio_chip_hwgpio(desc); +- const struct aspeed_gpio_bank *bank = to_bank(offset); + unsigned long flags; + ++ if (!aspeed_gpio_support_copro(gpio)) ++ return -EOPNOTSUPP; ++ + if (!gpio->cf_copro_bankmap) + return -ENXIO; + +@@ -1083,7 +1032,7 @@ + + /* Switch command source */ + if (gpio->cf_copro_bankmap[bindex] == 0) +- aspeed_gpio_change_cmd_source(gpio, bank, bindex, ++ aspeed_gpio_change_cmd_source(gpio, offset, + GPIO_CMDSRC_ARM); + bail: + raw_spin_unlock_irqrestore(&gpio->lock, flags); +@@ -1093,12 +1042,10 @@ + + static void aspeed_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p) + { +- const struct aspeed_gpio_bank *bank; + struct aspeed_gpio *gpio; +- u32 bit; + int rc, offset; + +- rc = irqd_to_aspeed_gpio_data(d, &gpio, &bank, &bit, &offset); ++ rc = irqd_to_aspeed_gpio_data(d, &gpio, &offset); + if (rc) + return; + +@@ -1115,6 +1062,173 @@ + GPIOCHIP_IRQ_RESOURCE_HELPERS, + }; + ++static void aspeed_g4_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg, bool val) ++{ ++ const struct aspeed_gpio_bank *bank = to_bank(offset); ++ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg); ++ u32 temp; ++ ++ if (reg == reg_val) ++ temp = gpio->dcache[GPIO_BANK(offset)]; ++ else ++ temp = ioread32(addr); ++ ++ if (val) ++ temp |= GPIO_BIT(offset); ++ else ++ temp &= ~GPIO_BIT(offset); ++ ++ if (reg == reg_val) ++ gpio->dcache[GPIO_BANK(offset)] = temp; ++ iowrite32(temp, addr); ++} ++ ++static bool aspeed_g4_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg) ++{ ++ const struct aspeed_gpio_bank *bank = to_bank(offset); ++ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg); ++ ++ return !!(ioread32(addr) & GPIO_BIT(offset)); ++} ++ ++static int aspeed_g4_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg) ++{ ++ const struct aspeed_gpio_bank *bank = to_bank(offset); ++ void __iomem *addr = aspeed_gpio_g4_bank_reg(gpio, bank, reg); ++ ++ if (reg == reg_rdata || reg == reg_irq_status) ++ return ioread32(addr); ++ else ++ return -EOPNOTSUPP; ++} ++ ++static void aspeed_g4_privilege_ctrl(struct aspeed_gpio *gpio, unsigned int offset, int cmdsrc) ++{ ++ /* ++ * The command source register is only valid in bits 0, 8, 16, and 24, so we use ++ * (offset & ~(0x7)) to ensure that reg_bits_set always targets a valid bit. ++ */ ++ /* Source 1 first to avoid illegal 11 combination */ ++ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc1, !!(cmdsrc & BIT(1))); ++ /* Then Source 0 */ ++ aspeed_g4_reg_bit_set(gpio, offset & ~(0x7), reg_cmdsrc0, !!(cmdsrc & BIT(0))); ++} ++ ++static void aspeed_g4_privilege_init(struct aspeed_gpio *gpio) ++{ ++ u32 i; ++ ++ /* Switch all command sources to the ARM by default */ ++ for (i = 0; i < DIV_ROUND_UP(gpio->chip.ngpio, 32); i++) { ++ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 0, GPIO_CMDSRC_ARM); ++ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 8, GPIO_CMDSRC_ARM); ++ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 16, GPIO_CMDSRC_ARM); ++ aspeed_g4_privilege_ctrl(gpio, (i << 5) + 24, GPIO_CMDSRC_ARM); ++ } ++} ++ ++static bool aspeed_g4_copro_request(struct aspeed_gpio *gpio, unsigned int offset) ++{ ++ if (!copro_ops || !gpio->cf_copro_bankmap) ++ return false; ++ if (!gpio->cf_copro_bankmap[offset >> 3]) ++ return false; ++ if (!copro_ops->request_access) ++ return false; ++ ++ /* Pause the coprocessor */ ++ copro_ops->request_access(copro_data); ++ ++ /* Change command source back to ARM */ ++ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_ARM); ++ ++ /* Update cache */ ++ gpio->dcache[GPIO_BANK(offset)] = aspeed_g4_reg_bank_get(gpio, offset, reg_rdata); ++ ++ return true; ++} ++ ++static void aspeed_g4_copro_release(struct aspeed_gpio *gpio, unsigned int offset) ++{ ++ if (!copro_ops || !gpio->cf_copro_bankmap) ++ return; ++ if (!gpio->cf_copro_bankmap[offset >> 3]) ++ return; ++ if (!copro_ops->release_access) ++ return; ++ ++ /* Change command source back to ColdFire */ ++ aspeed_g4_privilege_ctrl(gpio, offset, GPIO_CMDSRC_COLDFIRE); ++ ++ /* Restart the coprocessor */ ++ copro_ops->release_access(copro_data); ++} ++ ++static const struct aspeed_gpio_llops aspeed_g4_llops = { ++ .reg_bit_set = aspeed_g4_reg_bit_set, ++ .reg_bit_get = aspeed_g4_reg_bit_get, ++ .reg_bank_get = aspeed_g4_reg_bank_get, ++ .privilege_ctrl = aspeed_g4_privilege_ctrl, ++ .privilege_init = aspeed_g4_privilege_init, ++ .copro_request = aspeed_g4_copro_request, ++ .copro_release = aspeed_g4_copro_release, ++}; ++ ++static void aspeed_g7_reg_bit_set(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg, bool val) ++{ ++ u32 mask = aspeed_gpio_g7_reg_mask(reg); ++ void __iomem *addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset); ++ u32 write_val; ++ ++ if (mask) { ++ write_val = (ioread32(addr) & ~(mask)) | field_prep(mask, val); ++ iowrite32(write_val, addr); ++ } ++} ++ ++static bool aspeed_g7_reg_bit_get(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg) ++{ ++ u32 mask = aspeed_gpio_g7_reg_mask(reg); ++ void __iomem *addr; ++ ++ addr = gpio->base + GPIO_G7_CTRL_REG_OFFSET(offset); ++ if (reg == reg_val) ++ mask = GPIO_G7_CTRL_IN_DATA; ++ ++ if (mask) ++ return field_get(mask, ioread32(addr)); ++ else ++ return 0; ++} ++ ++static int aspeed_g7_reg_bank_get(struct aspeed_gpio *gpio, unsigned int offset, ++ const enum aspeed_gpio_reg reg) ++{ ++ void __iomem *addr; ++ ++ if (reg == reg_irq_status) { ++ addr = gpio->base + GPIO_G7_IRQ_STS_OFFSET(offset >> 5); ++ return ioread32(addr); ++ } else { ++ return -EOPNOTSUPP; ++ } ++} ++ ++static const struct aspeed_gpio_llops aspeed_g7_llops = { ++ .reg_bit_set = aspeed_g7_reg_bit_set, ++ .reg_bit_get = aspeed_g7_reg_bit_get, ++ .reg_bank_get = aspeed_g7_reg_bank_get, ++ .privilege_ctrl = NULL, ++ .privilege_init = NULL, ++ .copro_request = NULL, ++ .copro_release = NULL, ++}; ++ + /* + * Any banks not specified in a struct aspeed_bank_props array are assumed to + * have the properties: +@@ -1131,7 +1245,14 @@ + + static const struct aspeed_gpio_config ast2400_config = + /* 220 for simplicity, really 216 with two 4-GPIO holes, four at end */ +- { .nr_gpios = 220, .props = ast2400_bank_props, }; ++ { ++ .nr_gpios = 220, ++ .props = ast2400_bank_props, ++ .llops = &aspeed_g4_llops, ++ .debounce_timers_array = debounce_timers, ++ .debounce_timers_num = ARRAY_SIZE(debounce_timers), ++ .require_dcache = true, ++ }; + + static const struct aspeed_bank_props ast2500_bank_props[] = { + /* input output */ +@@ -1143,7 +1264,14 @@ + + static const struct aspeed_gpio_config ast2500_config = + /* 232 for simplicity, actual number is 228 (4-GPIO hole in GPIOAB) */ +- { .nr_gpios = 232, .props = ast2500_bank_props, }; ++ { ++ .nr_gpios = 232, ++ .props = ast2500_bank_props, ++ .llops = &aspeed_g4_llops, ++ .debounce_timers_array = debounce_timers, ++ .debounce_timers_num = ARRAY_SIZE(debounce_timers), ++ .require_dcache = true, ++ }; + + static const struct aspeed_bank_props ast2600_bank_props[] = { + /* input output */ +@@ -1159,17 +1287,48 @@ + * We expect ngpio being set in the device tree and this is a fallback + * option. + */ +- { .nr_gpios = 208, .props = ast2600_bank_props, }; ++ { ++ .nr_gpios = 208, ++ .props = ast2600_bank_props, ++ .llops = &aspeed_g4_llops, ++ .debounce_timers_array = debounce_timers, ++ .debounce_timers_num = ARRAY_SIZE(debounce_timers), ++ .require_dcache = true, ++ }; ++ ++static const struct aspeed_bank_props ast2700_bank_props[] = { ++ /* input output */ ++ { 1, 0x0fffffff, 0x0fffffff }, /* E/F/G/H, 4-GPIO hole */ ++ { 6, 0x00ffffff, 0x00ff0000 }, /* Y/Z/AA */ ++ {}, ++}; ++ ++static const struct aspeed_gpio_config ast2700_config = ++ /* ++ * ast2700 has two controllers one with 212 GPIOs and one with 16 GPIOs. ++ * 216 for simplicity, actual number is 212 (4-GPIO hole in GPIOH) ++ * We expect ngpio being set in the device tree and this is a fallback ++ * option. ++ */ ++ { ++ .nr_gpios = 216, ++ .props = ast2700_bank_props, ++ .llops = &aspeed_g7_llops, ++ .debounce_timers_array = g7_debounce_timers, ++ .debounce_timers_num = ARRAY_SIZE(g7_debounce_timers), ++ .require_dcache = false, ++ }; + + static const struct of_device_id aspeed_gpio_of_table[] = { + { .compatible = "aspeed,ast2400-gpio", .data = &ast2400_config, }, + { .compatible = "aspeed,ast2500-gpio", .data = &ast2500_config, }, + { .compatible = "aspeed,ast2600-gpio", .data = &ast2600_config, }, ++ { .compatible = "aspeed,ast2700-gpio", .data = &ast2700_config, }, + {} + }; + MODULE_DEVICE_TABLE(of, aspeed_gpio_of_table); + +-static int __init aspeed_gpio_probe(struct platform_device *pdev) ++static int aspeed_gpio_probe(struct platform_device *pdev) + { + const struct of_device_id *gpio_id; + struct gpio_irq_chip *girq; +@@ -1202,6 +1361,10 @@ + + gpio->config = gpio_id->data; + ++ if (!gpio->config->llops->reg_bit_set || !gpio->config->llops->reg_bit_get || ++ !gpio->config->llops->reg_bank_get) ++ return -EINVAL; ++ + gpio->chip.parent = &pdev->dev; + err = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpio); + gpio->chip.ngpio = (u16) ngpio; +@@ -1218,27 +1381,23 @@ + gpio->chip.label = dev_name(&pdev->dev); + gpio->chip.base = -1; + +- /* Allocate a cache of the output registers */ +- banks = DIV_ROUND_UP(gpio->chip.ngpio, 32); +- gpio->dcache = devm_kcalloc(&pdev->dev, +- banks, sizeof(u32), GFP_KERNEL); +- if (!gpio->dcache) +- return -ENOMEM; +- +- /* +- * Populate it with initial values read from the HW and switch +- * all command sources to the ARM by default +- */ +- for (i = 0; i < banks; i++) { +- const struct aspeed_gpio_bank *bank = &aspeed_gpio_banks[i]; +- void __iomem *addr = bank_reg(gpio, bank, reg_rdata); +- gpio->dcache[i] = ioread32(addr); +- aspeed_gpio_change_cmd_source(gpio, bank, 0, GPIO_CMDSRC_ARM); +- aspeed_gpio_change_cmd_source(gpio, bank, 1, GPIO_CMDSRC_ARM); +- aspeed_gpio_change_cmd_source(gpio, bank, 2, GPIO_CMDSRC_ARM); +- aspeed_gpio_change_cmd_source(gpio, bank, 3, GPIO_CMDSRC_ARM); ++ if (gpio->config->require_dcache) { ++ /* Allocate a cache of the output registers */ ++ banks = DIV_ROUND_UP(gpio->chip.ngpio, 32); ++ gpio->dcache = devm_kcalloc(&pdev->dev, banks, sizeof(u32), GFP_KERNEL); ++ if (!gpio->dcache) ++ return -ENOMEM; ++ /* ++ * Populate it with initial values read from the HW ++ */ ++ for (i = 0; i < banks; i++) ++ gpio->dcache[i] = ++ gpio->config->llops->reg_bank_get(gpio, (i << 5), reg_rdata); + } + ++ if (gpio->config->llops->privilege_init) ++ gpio->config->llops->privilege_init(gpio); ++ + /* Set up an irqchip */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) +@@ -1270,13 +1429,14 @@ + } + + static struct platform_driver aspeed_gpio_driver = { ++ .probe = aspeed_gpio_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = aspeed_gpio_of_table, + }, + }; + +-module_platform_driver_probe(aspeed_gpio_driver, aspeed_gpio_probe); ++module_platform_driver(aspeed_gpio_driver); + + MODULE_DESCRIPTION("Aspeed GPIO Driver"); + MODULE_LICENSE("GPL"); +diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig +--- a/drivers/gpu/drm/aspeed/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpu/drm/aspeed/Kconfig 2025-12-23 10:16:09.077234622 +0000 +@@ -4,6 +4,7 @@ + depends on DRM && OF + depends on (COMPILE_TEST || ARCH_ASPEED) + depends on MMU ++ select FB + select DRM_KMS_HELPER + select DRM_GEM_DMA_HELPER + select DMA_CMA if HAVE_DMA_CONTIGUOUS +diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h +--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h 2025-12-23 10:16:20.986034982 +0000 +@@ -8,14 +8,31 @@ + struct drm_device drm; + void __iomem *base; + struct clk *clk; +- struct reset_control *rst; ++ struct reset_control *rst_crt; ++ struct reset_control *rst_engine; + struct regmap *scu; ++ struct regmap *dp; ++ struct regmap *dpmcu; ++ struct regmap *pcie_ep; ++ ++ u8 dp_support; ++ u8 pcie_advance; ++ u8 pcie_active; + + u32 dac_reg; + u32 int_clr_reg; + u32 vga_scratch_reg; + u32 throd_val; + u32 scan_line_max; ++ u32 flags; ++ u32 pcie_int_reg; ++ u32 pcie_int_mask; ++ u32 pcie_int_l_to_h; ++ u32 pcie_int_h_to_l; ++ u32 pcie_link_reg; ++ u32 pcie_link_bit; ++ u32 soc_crt_bit; ++ u32 soc_dp_bit; + + struct drm_simple_display_pipe pipe; + struct drm_connector connector; +@@ -106,3 +123,58 @@ + /* CRT_THROD */ + #define CRT_THROD_LOW(x) (x) + #define CRT_THROD_HIGH(x) ((x) << 8) ++ ++/* SCU control */ ++#define G4_DISABLE_D2_PLL BIT(4) ++#define G4_40_CLK 0x46314 ++#define G6_CLK_SOURCE 0x300 ++#define G6_CLK_SOURCE_MASK (BIT(8) | BIT(9) | BIT(10)) ++#define G6_CLK_SOURCE_HPLL (BIT(8) | BIT(9) | BIT(10)) ++#define G6_CLK_SOURCE_USB BIT(9) ++#define G6_CLK_SEL3 0x308 ++#define G6_CLK_DIV_MASK 0x3F000 ++#define G6_CLK_DIV_16 (BIT(16) | BIT(15) | BIT(13) | BIT(12)) ++#define G6_USB_40_CLK BIT(9) ++ ++/* GFX FLAGS */ ++#define RESET_MASK BIT(0) ++#define RESET_G6 BIT(0) ++#define CLK_MASK (BIT(4) | BIT(5) | BIT(6)) ++#define CLK_G4 BIT(4) ++#define CLK_G6 BIT(5) ++#define CLK_G7 BIT(6) ++#define ADDR_64 BIT(12) ++ ++/* PCIE interrupt */ ++#define PCIE_PERST_L_T_H_G5 BIT(18) ++#define PCIE_PERST_H_T_L_G5 BIT(19) ++#define PCIE_PERST_L_T_H_G7 BIT(2) ++#define PCIE_PERST_H_T_L_G7 BIT(3) ++ ++/* PCIE end pointer define */ ++#define PCIE_LINK_REG_G5 0xC0 ++#define PCIE_LINK_STATUS_G5 BIT(5) ++#define PCIE_LINK_REG_G7 0x358 ++#define PCIE_LINK_STATUS_G7 BIT(8) ++ ++/* Adaptor function define */ ++/* AST2600: DP adaptor define */ ++#define DP_26_CP_NAME "aspeed,ast2600-displayport" ++#define DP_26_MCU_CP_NAME "aspeed,ast2600-displayport-mcu" ++/* AST2600 */ ++#define SCU_DP_STATUS 0x100 /* SCU100 VGA function handshake */ ++/* AST2700: DP adaptor define */ ++#define DP_27_CP_NAME "aspeed,ast2700-displayport" ++#define DP_27_MCU_CP_NAME "aspeed,ast2700-displayport-mcu" ++#define SCU_PCIE0_DP_STATUS 0x900 /* SCU900 PCIE0 handshake */ ++#define SCU_PCIE1_DP_STATUS 0x910 /* SCU910 PCIE1 handshake */ ++#define DP_LOCATE_PCIE1 BIT(8) /* DP located on PCIE1 */ ++ ++/* AST DP */ ++#define DP_EXECUTE 0x2E /* DP Status */ ++#define DP_SOURCE 0xb8 /* DPB8 dp source */ ++#define DP_CONTROL_FROM_SOC (BIT(24) | BIT(28)) ++/* AST DP MCU */ ++#define DP_RESOLUTION 0xde0 /* DPMCUDE0 dp resolution */ ++#define DP_800 0x01050020 /* 800 x 600 60Hz */ ++#define DP_1024 0x010a0020 /* 1024 x 768 70Hz */ +diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c +--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c 2025-12-23 10:16:20.979035100 +0000 +@@ -23,6 +23,40 @@ + return container_of(pipe, struct aspeed_gfx, pipe); + } + ++static void aspeed_gfx_set_g4_clock(struct aspeed_gfx *priv) ++{ ++ /* turn on d2 pll for soc disply at ast2400 */ ++ regmap_update_bits(priv->scu, priv->dac_reg, G4_DISABLE_D2_PLL, 0); ++ /* apply 800 x 600 @ 60 for soc disply at ast2400 */ ++ writel(G4_40_CLK, priv->base + CRT_MISC); ++} ++ ++static void aspeed_gfx_set_g6_clock_source(struct aspeed_gfx *priv, int mode_width) ++{ ++ regmap_update_bits(priv->scu, G6_CLK_SOURCE, G6_CLK_SOURCE_MASK, 0x0); ++ regmap_update_bits(priv->scu, G6_CLK_SEL3, G6_CLK_DIV_MASK, 0x0); ++ ++ switch (mode_width) { ++ case 1024: ++ /* hpll div 16 = 75Mhz */ ++ regmap_update_bits(priv->scu, G6_CLK_SOURCE, G6_CLK_SOURCE_MASK, G6_CLK_SOURCE_HPLL); ++ regmap_update_bits(priv->scu, G6_CLK_SEL3, G6_CLK_DIV_MASK, G6_CLK_DIV_16); ++ break; ++ case 800: ++ default: ++ /* usb 40Mhz */ ++ regmap_update_bits(priv->scu, G6_CLK_SOURCE, G6_CLK_SOURCE_MASK, G6_CLK_SOURCE_USB); ++ break; ++ } ++} ++ ++static void aspeed_gfx_set_g7_clock(struct aspeed_gfx *priv) ++{ ++ /* apply 800 x 600 @ 62 on ast2700 */ ++ regmap_update_bits(priv->scu, 0x288, BIT(14), BIT(14)); ++ regmap_write(priv->scu, 0x340, 0x00130002); ++} ++ + static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp) + { + struct drm_crtc *crtc = &priv->pipe.crtc; +@@ -59,8 +93,12 @@ + u32 ctrl1 = readl(priv->base + CRT_CTRL1); + u32 ctrl2 = readl(priv->base + CRT_CTRL2); + +- /* Set DAC source for display output to Graphics CRT (GFX) */ +- regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), BIT(16)); ++ /* change the display source is coming from soc display */ ++ if (!priv->pcie_advance || !priv->pcie_active) { ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_crt_bit, priv->soc_crt_bit); ++ if (priv->dp_support) ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_dp_bit, priv->soc_dp_bit); ++ } + + writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1); + writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); +@@ -74,7 +112,42 @@ + writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1); + writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2); + +- regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), 0); ++ /* Set display source for display output to pcie host display */ ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_crt_bit, 0); ++ if (priv->dp_support) ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_dp_bit, 0); ++} ++ ++static void aspeed_gfx_set_clk(struct aspeed_gfx *priv, int mode_width) ++{ ++ switch (priv->flags & CLK_MASK) { ++ case CLK_G4: ++ aspeed_gfx_set_g4_clock(priv); ++ break; ++ case CLK_G6: ++ aspeed_gfx_set_g6_clock_source(priv, mode_width); ++ break; ++ case CLK_G7: ++ aspeed_gfx_set_g7_clock(priv); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void aspeed_gfx_dp_mode_set(struct aspeed_gfx *priv, int mode_width) ++{ ++ switch (mode_width) { ++ case 1024: ++ /* hpll div 16 = 75Mhz */ ++ regmap_write(priv->dpmcu, DP_RESOLUTION, DP_1024); ++ break; ++ case 800: ++ default: ++ /* usb 40Mhz */ ++ regmap_write(priv->dpmcu, DP_RESOLUTION, DP_800); ++ break; ++ } + } + + static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv) +@@ -87,6 +160,8 @@ + if (err) + return; + ++ aspeed_gfx_set_clk(priv, m->hdisplay); ++ + #if 0 + /* TODO: we have only been able to test with the 40MHz USB clock. The + * clock is fixed, so we cannot adjust it here. */ +@@ -137,6 +212,10 @@ + * per line, rounded up) + */ + writel(priv->throd_val, priv->base + CRT_THROD); ++ ++ /* set the dp mode index */ ++ if (priv->dp_support) ++ aspeed_gfx_dp_mode_set(priv, m->hdisplay); + } + + static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe, +@@ -187,12 +266,17 @@ + gem = drm_fb_dma_get_gem_obj(fb, 0); + if (!gem) + return; +- writel(gem->dma_addr, priv->base + CRT_ADDR); ++ ++ if (priv->flags & ADDR_64) ++ writel((gem->dma_addr >> 2), priv->base + CRT_ADDR); ++ else ++ writel(gem->dma_addr, priv->base + CRT_ADDR); + } + + static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe) + { + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); ++ + u32 reg = readl(priv->base + CRT_CTRL1); + + /* Clear pending VBLANK IRQ */ +@@ -207,6 +291,7 @@ + static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe) + { + struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe); ++ + u32 reg = readl(priv->base + CRT_CTRL1); + + reg &= ~CRT_CTRL_VERTICAL_INTR_EN; +diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c 2025-12-23 10:16:20.992034882 +0000 +@@ -63,6 +63,15 @@ + u32 vga_scratch_reg; /* VGA scratch register in SCU */ + u32 throd_val; /* Default Threshold Seting */ + u32 scan_line_max; /* Max memory size of one scan line */ ++ u32 gfx_flags; /* Flags for gfx chip caps */ ++ u32 pcie_int_reg; /* pcie interrupt */ ++ u32 pcie_int_mask; /* pcie PERST# mask */ ++ u32 pcie_int_l_to_h; /* pcie PERST# low to high */ ++ u32 pcie_int_h_to_l; /* pcie PERST# high to low */ ++ u32 pcie_link_reg; /* pcie link status offset */ ++ u32 pcie_link_bit; /* pcie link status bit */ ++ u32 soc_crt_bit; /* soc display crt switch flag*/ ++ u32 soc_dp_bit; /* soc display dp switch flag*/ + }; + + static const struct aspeed_gfx_config ast2400_config = { +@@ -71,6 +80,15 @@ + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12), + .scan_line_max = 64, ++ .gfx_flags = CLK_G4, ++ .pcie_int_reg = 0x0, ++ .pcie_int_mask = 0x0, ++ .pcie_int_l_to_h = 0x0, ++ .pcie_int_h_to_l = 0x0, ++ .pcie_link_reg = 0x0, ++ .pcie_link_bit = 0x0, ++ .soc_crt_bit = BIT(16), ++ .soc_dp_bit = 0x0, + }; + + static const struct aspeed_gfx_config ast2500_config = { +@@ -79,6 +97,15 @@ + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c), + .scan_line_max = 128, ++ .gfx_flags = 0, ++ .pcie_int_reg = 0x18, ++ .pcie_int_mask = (PCIE_PERST_L_T_H_G5 | PCIE_PERST_H_T_L_G5), ++ .pcie_int_l_to_h = PCIE_PERST_L_T_H_G5, ++ .pcie_int_h_to_l = PCIE_PERST_H_T_L_G5, ++ .pcie_link_reg = PCIE_LINK_REG_G5, ++ .pcie_link_bit = PCIE_LINK_STATUS_G5, ++ .soc_crt_bit = BIT(16), ++ .soc_dp_bit = 0x0, + }; + + static const struct aspeed_gfx_config ast2600_config = { +@@ -87,12 +114,39 @@ + .vga_scratch_reg = 0x50, + .throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70), + .scan_line_max = 128, ++ .gfx_flags = RESET_G6 | CLK_G6, ++ .pcie_int_reg = 0x560, ++ .pcie_int_mask = (PCIE_PERST_L_T_H_G5 | PCIE_PERST_H_T_L_G5), ++ .pcie_int_l_to_h = PCIE_PERST_L_T_H_G5, ++ .pcie_int_h_to_l = PCIE_PERST_H_T_L_G5, ++ .pcie_link_reg = PCIE_LINK_REG_G5, ++ .pcie_link_bit = PCIE_LINK_STATUS_G5, ++ .soc_crt_bit = BIT(16), ++ .soc_dp_bit = BIT(18), ++}; ++ ++static const struct aspeed_gfx_config ast2700_config = { ++ .dac_reg = 0x414, ++ .int_clear_reg = 0x68, ++ .vga_scratch_reg = 0x50, ++ .throd_val = CRT_THROD_LOW(0x50) | CRT_THROD_HIGH(0x70), ++ .scan_line_max = 128, ++ .gfx_flags = CLK_G7 | ADDR_64, ++ .pcie_int_reg = 0x1D0, ++ .pcie_int_mask = (PCIE_PERST_L_T_H_G7 | PCIE_PERST_H_T_L_G7), ++ .pcie_int_l_to_h = PCIE_PERST_L_T_H_G7, ++ .pcie_int_h_to_l = PCIE_PERST_H_T_L_G7, ++ .pcie_link_reg = PCIE_LINK_REG_G7, ++ .pcie_link_bit = PCIE_LINK_STATUS_G7, ++ .soc_crt_bit = BIT(11), ++ .soc_dp_bit = BIT(9), + }; + + static const struct of_device_id aspeed_gfx_match[] = { + { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-gfx", .data = &ast2600_config }, ++ { .compatible = "aspeed,ast2700-gfx", .data = &ast2700_config }, + { }, + }; + MODULE_DEVICE_TABLE(of, aspeed_gfx_match); +@@ -105,6 +159,7 @@ + + static int aspeed_gfx_setup_mode_config(struct drm_device *drm) + { ++ struct aspeed_gfx *priv = to_aspeed_gfx(drm); + int ret; + + ret = drmm_mode_config_init(drm); +@@ -113,13 +168,63 @@ + + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; +- drm->mode_config.max_width = 800; +- drm->mode_config.max_height = 600; ++ ++ switch (priv->flags & CLK_MASK) { ++ case CLK_G6: ++ drm->mode_config.max_width = 1024; ++ drm->mode_config.max_height = 768; ++ break; ++ default: ++ drm->mode_config.max_width = 800; ++ drm->mode_config.max_height = 600; ++ break; ++ } ++ + drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; + + return ret; + } + ++static irqreturn_t aspeed_host_irq_handler(int irq, void *data) ++{ ++ struct drm_device *drm = data; ++ struct aspeed_gfx *priv = to_aspeed_gfx(drm); ++ u32 reg; ++ ++ regmap_read(priv->scu, priv->pcie_int_reg, ®); ++ ++ if (reg & priv->pcie_int_mask) { ++ if (reg & priv->pcie_int_l_to_h) { ++ dev_dbg(drm->dev, "pcie active.\n"); ++ /*Change the DP back to host*/ ++ if (priv->dp_support) { ++ /*Change the DP back to host*/ ++ regmap_update_bits(priv->dp, DP_SOURCE, DP_CONTROL_FROM_SOC, 0); ++ dev_dbg(drm->dev, "dp set at 0 int L_T_H.\n"); ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_dp_bit, 0); ++ } ++ ++ /*Change the CRT back to host*/ ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_crt_bit, 0); ++ } else if (reg & priv->pcie_int_h_to_l) { ++ dev_dbg(drm->dev, "pcie de-active.\n"); ++ /*Change the DP into host*/ ++ if (priv->dp_support) { ++ /*Change the DP back to soc*/ ++ regmap_update_bits(priv->dp, DP_SOURCE, DP_CONTROL_FROM_SOC, DP_CONTROL_FROM_SOC); ++ dev_dbg(drm->dev, "dp set at 11 int H_T_L.\n"); ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_dp_bit, priv->soc_dp_bit); ++ } ++ ++ /*Change the CRT into soc*/ ++ regmap_update_bits(priv->scu, priv->dac_reg, priv->soc_crt_bit, priv->soc_crt_bit); ++ } ++ return IRQ_HANDLED; ++ } ++ ++ return IRQ_NONE; ++} ++ + static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) + { + struct drm_device *drm = data; +@@ -137,6 +242,145 @@ + return IRQ_NONE; + } + ++static int aspeed_pcie_active_detect(struct drm_device *drm) ++{ ++ struct aspeed_gfx *priv = to_aspeed_gfx(drm); ++ u32 reg = 0; ++ ++ /* map pcie ep resource */ ++ priv->pcie_ep = syscon_regmap_lookup_by_compatible("aspeed,ast2500-pcie-ep"); ++ if (IS_ERR(priv->pcie_ep)) { ++ priv->pcie_ep = syscon_regmap_lookup_by_compatible("aspeed,ast2600-pcie-phy"); ++ if (IS_ERR(priv->pcie_ep)) { ++ priv->pcie_ep = syscon_regmap_lookup_by_compatible("aspeed,ast2700-pcie-phy"); ++ if (IS_ERR(priv->pcie_ep)) { ++ dev_err(drm->dev, "failed to find pcie_ep regmap\n"); ++ return PTR_ERR(priv->pcie_ep); ++ } ++ } ++ } ++ ++ /* check pcie rst status */ ++ regmap_read(priv->pcie_ep, priv->pcie_link_reg, ®); ++ ++ /* host vga is on or not */ ++ if (reg & priv->pcie_link_bit) ++ priv->pcie_active = 0x1; ++ else ++ priv->pcie_active = 0x0; ++ ++ dev_dbg(drm->dev, "pcie_active %x\n", priv->pcie_active); ++ ++ return 0; ++} ++ ++static int aspeed_adaptor_detect(struct drm_device *drm) ++{ ++ struct aspeed_gfx *priv = to_aspeed_gfx(drm); ++ u32 dp_status_offset = 0, reg = 0; ++ ++ switch (priv->flags & CLK_MASK) { ++ case CLK_G6: ++ /* check AST DP is executed or not*/ ++ regmap_read(priv->scu, SCU_DP_STATUS, ®); ++ if (((reg >> 8) & DP_EXECUTE) == DP_EXECUTE) { ++ priv->dp_support = 0x1; ++ ++ priv->dp = syscon_regmap_lookup_by_compatible(DP_26_CP_NAME); ++ if (IS_ERR(priv->dp)) { ++ dev_err(drm->dev, "failed to find DP regmap\n"); ++ return PTR_ERR(priv->dp); ++ } ++ ++ priv->dpmcu = syscon_regmap_lookup_by_compatible(DP_26_MCU_CP_NAME); ++ if (IS_ERR(priv->dpmcu)) { ++ dev_err(drm->dev, "failed to find DP MCU regmap\n"); ++ return PTR_ERR(priv->dpmcu); ++ } ++ ++ /* change the dp setting is coming from soc display */ ++ if (!priv->pcie_active) ++ regmap_update_bits(priv->dp, DP_SOURCE, DP_CONTROL_FROM_SOC, DP_CONTROL_FROM_SOC); ++ } ++ break; ++ case CLK_G7: ++ /* check AST DP is located on PCIE0 or PCIE1 */ ++ regmap_read(priv->scu, priv->dac_reg, ®); ++ ++ if (reg & DP_LOCATE_PCIE1) ++ dp_status_offset = SCU_PCIE1_DP_STATUS; ++ else ++ dp_status_offset = SCU_PCIE0_DP_STATUS; ++ ++ /* check AST DP is executed or not*/ ++ regmap_read(priv->scu, dp_status_offset, ®); ++ if (((reg >> 8) & DP_EXECUTE) == DP_EXECUTE) { ++ priv->dp_support = 0x1; ++ ++ priv->dp = syscon_regmap_lookup_by_compatible(DP_27_CP_NAME); ++ if (IS_ERR(priv->dp)) { ++ dev_err(drm->dev, "failed to find DP regmap\n"); ++ return PTR_ERR(priv->dp); ++ } ++ ++ priv->dpmcu = syscon_regmap_lookup_by_compatible(DP_27_MCU_CP_NAME); ++ if (IS_ERR(priv->dpmcu)) { ++ dev_err(drm->dev, "failed to find DP MCU regmap\n"); ++ return PTR_ERR(priv->dpmcu); ++ } ++ ++ /* change the dp setting is coming from soc display */ ++ regmap_update_bits(priv->dp, DP_SOURCE, DP_CONTROL_FROM_SOC, DP_CONTROL_FROM_SOC); ++ } ++ ++ break; ++ default: ++ priv->dp_support = 0x0; ++ priv->dp = NULL; ++ priv->dpmcu = NULL; ++ break; ++ } ++ return 0; ++} ++ ++static int aspeed_gfx_reset(struct drm_device *drm) ++{ ++ struct platform_device *pdev = to_platform_device(drm->dev); ++ struct aspeed_gfx *priv = to_aspeed_gfx(drm); ++ ++ switch (priv->flags & RESET_MASK) { ++ case RESET_G6: ++ priv->rst_crt = devm_reset_control_get(&pdev->dev, "crt"); ++ if (IS_ERR(priv->rst_crt)) { ++ dev_err(&pdev->dev, ++ "missing or invalid crt reset controller device tree entry"); ++ return PTR_ERR(priv->rst_crt); ++ } ++ reset_control_deassert(priv->rst_crt); ++ ++ priv->rst_engine = devm_reset_control_get(&pdev->dev, "engine"); ++ if (IS_ERR(priv->rst_engine)) { ++ dev_err(&pdev->dev, ++ "missing or invalid engine reset controller device tree entry"); ++ return PTR_ERR(priv->rst_engine); ++ } ++ reset_control_deassert(priv->rst_engine); ++ break; ++ ++ default: ++ priv->rst_crt = devm_reset_control_get_exclusive(&pdev->dev, NULL); ++ if (IS_ERR(priv->rst_crt)) { ++ dev_err(&pdev->dev, ++ "missing or invalid reset controller device tree entry"); ++ return PTR_ERR(priv->rst_crt); ++ } ++ reset_control_deassert(priv->rst_crt); ++ break; ++ } ++ ++ return 0; ++} ++ + static int aspeed_gfx_load(struct drm_device *drm) + { + struct platform_device *pdev = to_platform_device(drm->dev); +@@ -144,6 +388,7 @@ + struct device_node *np = pdev->dev.of_node; + const struct aspeed_gfx_config *config; + struct resource *res; ++ u64 dma_mask = 0; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +@@ -160,13 +405,40 @@ + priv->vga_scratch_reg = config->vga_scratch_reg; + priv->throd_val = config->throd_val; + priv->scan_line_max = config->scan_line_max; ++ priv->flags = config->gfx_flags; ++ priv->pcie_int_reg = config->pcie_int_reg; ++ priv->pcie_int_mask = config->pcie_int_mask; ++ priv->pcie_int_l_to_h = config->pcie_int_l_to_h; ++ priv->pcie_int_h_to_l = config->pcie_int_h_to_l; ++ priv->pcie_link_reg = config->pcie_link_reg; ++ priv->pcie_link_bit = config->pcie_link_bit; ++ priv->soc_crt_bit = config->soc_crt_bit; ++ priv->soc_dp_bit = config->soc_dp_bit; ++ ++ /* Add pcie auto detect if the register has been assigned */ ++ if (priv->pcie_int_reg != 0x0) ++ priv->pcie_advance = 1; ++ else ++ priv->pcie_advance = 0; ++ ++ /* Set the DMA mask by addr */ ++ if (priv->flags & ADDR_64) ++ dma_mask = DMA_BIT_MASK(64); ++ else ++ dma_mask = DMA_BIT_MASK(32); + + priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon"); + if (IS_ERR(priv->scu)) { +- priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); ++ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2400-scu"); + if (IS_ERR(priv->scu)) { +- dev_err(&pdev->dev, "failed to find SCU regmap\n"); +- return PTR_ERR(priv->scu); ++ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu"); ++ if (IS_ERR(priv->scu)) { ++ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2600-scu"); ++ if (IS_ERR(priv->scu)) { ++ dev_err(&pdev->dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(priv->scu); ++ } ++ } + } + } + +@@ -177,19 +449,18 @@ + return ret; + } + +- ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); ++ ret = dma_set_mask_and_coherent(drm->dev, dma_mask); + if (ret) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret); + return ret; + } + +- priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL); +- if (IS_ERR(priv->rst)) { ++ ret = aspeed_gfx_reset(drm); ++ if (ret) { + dev_err(&pdev->dev, + "missing or invalid reset controller device tree entry"); +- return PTR_ERR(priv->rst); ++ return ret; + } +- reset_control_deassert(priv->rst); + + priv->clk = devm_clk_get(drm->dev, NULL); + if (IS_ERR(priv->clk)) { +@@ -199,6 +470,22 @@ + } + clk_prepare_enable(priv->clk); + ++ if (priv->pcie_advance) { ++ ret = aspeed_pcie_active_detect(drm); ++ if (ret) { ++ dev_err(&pdev->dev, ++ "missing or invalid pcie-ep controller device tree entry"); ++ return ret; ++ } ++ } ++ ++ ret = aspeed_adaptor_detect(drm); ++ if (ret) { ++ dev_err(&pdev->dev, ++ "missing or invalid adaptor controller device tree entry"); ++ return ret; ++ } ++ + /* Sanitize control registers */ + writel(0, priv->base + CRT_CTRL1); + writel(0, priv->base + CRT_CTRL2); +@@ -232,6 +519,23 @@ + return ret; + } + ++ /* install pcie reset detect */ ++ if (of_property_read_bool(np, "pcie-reset-detect") && priv->pcie_advance) { ++ dev_dbg(drm->dev, "hook pcie reset.\n"); ++ ++ /* Special watch the host power up / down */ ++ ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 1), aspeed_host_irq_handler, IRQF_SHARED, "aspeed host active", drm); ++ if (ret < 0) { ++ dev_err(drm->dev, "Failed to install HOST active handler\n"); ++ return ret; ++ } ++ ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 2), aspeed_host_irq_handler, IRQF_SHARED, "aspeed host deactivate", drm); ++ if (ret < 0) { ++ dev_err(drm->dev, "Failed to install HOST de-active handler\n"); ++ return ret; ++ } ++ } ++ + drm_mode_config_reset(drm); + + return 0; +@@ -239,6 +543,12 @@ + + static void aspeed_gfx_unload(struct drm_device *drm) + { ++ struct aspeed_gfx *priv = drm->dev_private; ++ ++ /* change the dp setting is coming from host side */ ++ if (priv->dp_support) ++ regmap_update_bits(priv->dp, DP_SOURCE, DP_CONTROL_FROM_SOC, 0); ++ + drm_kms_helper_poll_fini(drm); + } + +diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c +--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c 2025-12-23 10:16:20.973035200 +0000 +@@ -10,7 +10,19 @@ + + static int aspeed_gfx_get_modes(struct drm_connector *connector) + { +- return drm_add_modes_noedid(connector, 800, 600); ++ struct aspeed_gfx *priv = container_of(connector, struct aspeed_gfx, connector); ++ int mode_count = 0; ++ ++ switch (priv->flags & CLK_MASK) { ++ case CLK_G6: ++ mode_count = drm_add_modes_noedid(connector, 1024, 768); ++ break; ++ default: ++ mode_count = drm_add_modes_noedid(connector, 800, 600); ++ break; ++ } ++ ++ return mode_count; + } + + static const struct +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +--- a/drivers/hwmon/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/hwmon/Kconfig 2025-12-23 10:16:08.059251693 +0000 +@@ -423,6 +423,16 @@ + This driver can also be built as a module. If so, the module + will be called aspeed_g6_pwm_tach. + ++config SENSORS_ASPEED_CHASSIS ++ tristate "ASPEED CHASSIS Driver" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ help ++ This driver provides support for Aspeed ast2600 chassis intruded ++ detect support. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called aspeed-chassis. ++ + config SENSORS_ATXP1 + tristate "Attansic ATXP1 VID controller" + depends on I2C +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile +--- a/drivers/hwmon/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/hwmon/Makefile 2025-12-23 10:16:11.821188606 +0000 +@@ -55,6 +55,7 @@ + obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o + obj-$(CONFIG_SENSORS_ASPEED) += aspeed-pwm-tacho.o + obj-$(CONFIG_SENSORS_ASPEED_G6) += aspeed-g6-pwm-tach.o ++obj-$(CONFIG_SENSORS_ASPEED_CHASSIS) += aspeed-chassis.o + obj-$(CONFIG_SENSORS_ASUS_ROG_RYUJIN) += asus_rog_ryujin.o + obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o + obj-$(CONFIG_SENSORS_AXI_FAN_CONTROL) += axi-fan-control.o +diff --git a/drivers/hwmon/aspeed-chassis.c b/drivers/hwmon/aspeed-chassis.c +--- a/drivers/hwmon/aspeed-chassis.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/hwmon/aspeed-chassis.c 2025-12-23 10:16:20.680040111 +0000 +@@ -0,0 +1,221 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (C) 2021 ASPEED Technology Inc. ++ * ++ * CHASSIS driver for the Aspeed SoC ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* #define USE_INTERRUPTS */ ++/******************************************************************************/ ++union chassis_ctrl_register { ++ u32 value; ++ struct { ++ uint32_t intrusion_status_clear : 1; /*[0]*/ ++ uint32_t intrusion_int_enable : 1; /*[1]*/ ++ uint32_t intrusion_status : 1; /*[2]*/ ++ uint32_t battery_power_good : 1; /*[3]*/ ++ uint32_t chassis_raw_status : 1; /*[4]*/ ++ uint32_t reserved0 : 3; /*[5-7]*/ ++ uint32_t io_power_status_clear : 1; /*[8]*/ ++ uint32_t io_power_int_enable : 1; /*[9]*/ ++ uint32_t core_power_status : 1; /*[10]*/ ++ uint32_t reserved1 : 5; /*[11-15]*/ ++ uint32_t core_power_status_clear : 1; /*[16]*/ ++ uint32_t core_power_int_enable : 1; /*[17]*/ ++ uint32_t io_power_status : 1; /*[18]*/ ++ uint32_t reserved2 : 13; /*[19-31]*/ ++ } fields; ++}; ++ ++struct aspeed_chassis { ++ struct device *dev; ++ void __iomem *base; ++ int irq; ++ /* for hwmon */ ++ const struct attribute_group *groups[2]; ++}; ++ ++static ssize_t ++intrusion_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long val; ++ struct aspeed_chassis *chassis = dev_get_drvdata(dev); ++ union chassis_ctrl_register chassis_ctrl; ++ ++ if (kstrtoul(buf, 10, &val) < 0 || val != 0) ++ return -EINVAL; ++ ++ chassis_ctrl.value = readl(chassis->base); ++ chassis_ctrl.fields.intrusion_status_clear = 1; ++ writel(chassis_ctrl.value, chassis->base); ++ chassis_ctrl.fields.intrusion_status_clear = 0; ++ writel(chassis_ctrl.value, chassis->base); ++ return count; ++} ++ ++static ssize_t intrusion_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); ++ int index = sensor_attr->index; ++ struct aspeed_chassis *chassis = dev_get_drvdata(dev); ++ union chassis_ctrl_register chassis_ctrl; ++ u8 ret; ++ ++ chassis_ctrl.value = readl(chassis->base); ++ ++ switch (index) { ++ case 0: ++ ret = chassis_ctrl.fields.core_power_status; ++ break; ++ case 1: ++ ret = chassis_ctrl.fields.io_power_status; ++ break; ++ case 2: ++ ret = chassis_ctrl.fields.intrusion_status; ++ break; ++ } ++ ++ return sprintf(buf, "%d\n", ret); ++} ++ ++static SENSOR_DEVICE_ATTR_RO(core_power, intrusion, 0); ++static SENSOR_DEVICE_ATTR_RO(io_power, intrusion, 1); ++static SENSOR_DEVICE_ATTR_RW(intrusion0_alarm, intrusion, 2); ++ ++static struct attribute *intrusion_dev_attrs[] = { ++ &sensor_dev_attr_core_power.dev_attr.attr, ++ &sensor_dev_attr_io_power.dev_attr.attr, ++ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, NULL ++}; ++ ++static const struct attribute_group intrusion_dev_group = { ++ .attrs = intrusion_dev_attrs, ++ .is_visible = NULL, ++}; ++ ++#ifdef USE_INTERRUPTS ++static void aspeed_chassis_status_check(struct aspeed_chassis *chassis) ++{ ++ union chassis_ctrl_register chassis_ctrl; ++ ++ chassis_ctrl.value = readl(chassis->base); ++ if (chassis_ctrl.fields.intrusion_status) { ++ dev_info(chassis->dev, "CHASI# pin has been pulled low"); ++ chassis_ctrl.fields.intrusion_status_clear = 1; ++ writel(chassis_ctrl.value, chassis->base); ++ chassis_ctrl.fields.intrusion_status_clear = 0; ++ writel(chassis_ctrl.value, chassis->base); ++ } ++ ++ if (chassis_ctrl.fields.core_power_status) { ++ dev_info(chassis->dev, "Core power has been pulled low"); ++ chassis_ctrl.fields.core_power_status_clear = 1; ++ writel(chassis_ctrl.value, chassis->base); ++ chassis_ctrl.fields.core_power_status_clear = 0; ++ writel(chassis_ctrl.value, chassis->base); ++ } ++ ++ if (chassis_ctrl.fields.io_power_status) { ++ dev_info(chassis->dev, "IO power has been pulled low"); ++ chassis_ctrl.fields.io_power_status_clear = 1; ++ writel(chassis_ctrl.value, chassis->base); ++ chassis_ctrl.fields.io_power_status_clear = 0; ++ writel(chassis_ctrl.value, chassis->base); ++ } ++} ++ ++static irqreturn_t aspeed_chassis_isr(int this_irq, void *dev_id) ++{ ++ struct aspeed_chassis *chassis = dev_id; ++ ++ aspeed_chassis_status_check(chassis); ++ return IRQ_HANDLED; ++} ++#endif ++ ++static void aspeed_chassis_int_ctrl(struct aspeed_chassis *chassis, bool ctrl) ++{ ++ union chassis_ctrl_register chassis_ctrl; ++ ++ chassis_ctrl.value = readl(chassis->base); ++ chassis_ctrl.fields.intrusion_int_enable = ctrl; ++ chassis_ctrl.fields.io_power_int_enable = ctrl; ++ chassis_ctrl.fields.core_power_int_enable = ctrl; ++ writel(chassis_ctrl.value, chassis->base); ++} ++ ++static const struct of_device_id aspeed_chassis_of_table[] = { ++ { .compatible = "aspeed,ast2600-chassis" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_chassis_of_table); ++ ++static int aspeed_chassis_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct aspeed_chassis *priv; ++ struct device *hwmon; ++ int __maybe_unused ret; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ priv->dev = dev; ++ priv->base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(priv->base)) ++ return PTR_ERR(priv->base); ++#ifdef USE_INTERRUPTS ++ priv->irq = platform_get_irq(pdev, 0); ++ if (priv->irq < 0) { ++ dev_err(dev, "no irq specified\n"); ++ return -ENOENT; ++ } ++ ++ ret = devm_request_irq(dev, priv->irq, aspeed_chassis_isr, 0, ++ dev_name(dev), priv); ++ if (ret) { ++ dev_err(dev, "Chassis Unable to get IRQ"); ++ return ret; ++ } ++ aspeed_chassis_int_ctrl(priv, true); ++#else ++ aspeed_chassis_int_ctrl(priv, false); ++#endif ++ ++ priv->groups[0] = &intrusion_dev_group; ++ priv->groups[1] = NULL; ++ ++ hwmon = devm_hwmon_device_register_with_groups(dev, "aspeed_chassis", ++ priv, priv->groups); ++ ++ return PTR_ERR_OR_ZERO(hwmon); ++} ++ ++static struct platform_driver aspeed_chassis_driver = { ++ .probe = aspeed_chassis_probe, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_chassis_of_table, ++ }, ++}; ++ ++module_platform_driver(aspeed_chassis_driver); ++ ++MODULE_AUTHOR("Billy Tsai"); ++MODULE_DESCRIPTION("ASPEED CHASSIS Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c +--- a/drivers/hwmon/aspeed-g6-pwm-tach.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/hwmon/aspeed-g6-pwm-tach.c 2025-12-23 10:16:20.686040010 +0000 +@@ -56,6 +56,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -137,7 +138,7 @@ + struct reset_control *reset; + unsigned long clk_rate; + bool tach_present[TACH_ASPEED_NR_TACHS]; +- u32 tach_divisor; ++ u32 tach_divisor[TACH_ASPEED_NR_TACHS]; + }; + + static inline struct aspeed_pwm_tach_data * +@@ -282,12 +283,14 @@ + priv->base + TACH_ASPEED_CTRL(tach_ch)); + } + +-static int aspeed_tach_val_to_rpm(struct aspeed_pwm_tach_data *priv, u32 tach_val) ++static int aspeed_tach_val_to_rpm(struct aspeed_pwm_tach_data *priv, ++ u32 tach_val, u8 fan_tach_ch) + { + u64 rpm; + u32 tach_div; + +- tach_div = tach_val * priv->tach_divisor * DEFAULT_FAN_PULSE_PR; ++ tach_div = tach_val * priv->tach_divisor[fan_tach_ch] * ++ DEFAULT_FAN_PULSE_PR; + + dev_dbg(priv->dev, "clk %ld, tach_val %d , tach_div %d\n", + priv->clk_rate, tach_val, tach_div); +@@ -308,7 +311,7 @@ + if (!(val & TACH_ASPEED_FULL_MEASUREMENT)) + return 0; + val = FIELD_GET(TACH_ASPEED_VALUE_MASK, val); +- return aspeed_tach_val_to_rpm(priv, val); ++ return aspeed_tach_val_to_rpm(priv, val, fan_tach_ch); + } + + static int aspeed_tach_hwmon_read(struct device *dev, +@@ -345,11 +348,11 @@ + if (!is_power_of_2(val) || (ilog2(val) % 2) || + DIV_TO_REG(val) > 0xb) + return -EINVAL; +- priv->tach_divisor = val; ++ priv->tach_divisor[channel] = val; + reg_val = readl(priv->base + TACH_ASPEED_CTRL(channel)); + reg_val &= ~TACH_ASPEED_CLK_DIV_T_MASK; + reg_val |= FIELD_PREP(TACH_ASPEED_CLK_DIV_T_MASK, +- DIV_TO_REG(priv->tach_divisor)); ++ DIV_TO_REG(priv->tach_divisor[channel])); + writel(reg_val, priv->base + TACH_ASPEED_CTRL(channel)); + break; + default: +@@ -407,7 +410,7 @@ + for (index = 0; index < count; index++) { + ch = tach_ch[index]; + priv->tach_present[ch] = true; +- priv->tach_divisor = DEFAULT_TACH_DIV; ++ priv->tach_divisor[ch] = DEFAULT_TACH_DIV; + + val = readl(priv->base + TACH_ASPEED_CTRL(ch)); + val &= ~(TACH_ASPEED_INVERS_LIMIT | TACH_ASPEED_DEBOUNCE_MASK | +@@ -416,7 +419,7 @@ + val |= (DEBOUNCE_3_CLK << TACH_ASPEED_DEBOUNCE_BIT) | + F2F_EDGES | + FIELD_PREP(TACH_ASPEED_CLK_DIV_T_MASK, +- DIV_TO_REG(priv->tach_divisor)); ++ DIV_TO_REG(priv->tach_divisor[ch])); + writel(val, priv->base + TACH_ASPEED_CTRL(ch)); + + aspeed_tach_ch_enable(priv, ch, true); +@@ -452,6 +455,50 @@ + reset_control_assert(rst); + } + ++static void aspeed_pwm_set_wdt_reload(struct pwm_chip *chip, ++ struct pwm_device *pwm, ++ u64 reload_duty_cycle) ++{ ++ struct aspeed_pwm_tach_data *priv = aspeed_pwm_chip_to_data(chip); ++ u32 hwpwm = pwm->hwpwm, val; ++ ++ val = readl(priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm)); ++ val &= ~PWM_ASPEED_DUTY_CYCLE_POINT_AS_WDT; ++ val |= FIELD_PREP(PWM_ASPEED_DUTY_CYCLE_POINT_AS_WDT, ++ reload_duty_cycle); ++ writel(val, priv->base + PWM_ASPEED_DUTY_CYCLE(hwpwm)); ++ ++ val = readl(priv->base + PWM_ASPEED_CTRL(hwpwm)); ++ val |= PWM_ASPEED_CTRL_DUTY_LOAD_AS_WDT_ENABLE; ++ writel(val, priv->base + PWM_ASPEED_CTRL(hwpwm)); ++} ++ ++static struct pwm_device * ++aspeed_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args) ++{ ++ struct pwm_device *pwm; ++ ++ /* period in the second cell and flags in the third cell are optional */ ++ if (args->args_count < 1) ++ return ERR_PTR(-EINVAL); ++ ++ pwm = pwm_request_from_chip(chip, args->args[0], NULL); ++ if (IS_ERR(pwm)) ++ return pwm; ++ ++ if (args->args_count > 1) ++ pwm->args.period = args->args[1]; ++ ++ pwm->args.polarity = PWM_POLARITY_NORMAL; ++ if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED) ++ pwm->args.polarity = PWM_POLARITY_INVERSED; ++ ++ if (args->args_count > 3 && args->args[3] < U8_MAX) ++ aspeed_pwm_set_wdt_reload(chip, pwm, args->args[3]); ++ ++ return pwm; ++} ++ + static int aspeed_pwm_tach_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev, *hwmon; +@@ -493,6 +540,8 @@ + pwmchip_set_drvdata(chip, priv); + chip->ops = &aspeed_pwm_ops; + ++ chip->of_xlate = aspeed_pwm_xlate; ++ + ret = devm_pwmchip_add(dev, chip); + if (ret) + return dev_err_probe(dev, ret, "Failed to add PWM chip\n"); +@@ -528,6 +577,9 @@ + { + .compatible = "aspeed,ast2600-pwm-tach", + }, ++ { ++ .compatible = "aspeed,ast2700-pwm-tach", ++ }, + {}, + }; + MODULE_DEVICE_TABLE(of, aspeed_pwm_tach_match); +diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +--- a/drivers/i2c/busses/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/i2c/busses/Kconfig 2025-12-23 10:16:07.612259189 +0000 +@@ -411,6 +411,17 @@ + This driver can also be built as a module. If so, the module + will be called i2c-altera. + ++config I2C_AST2600 ++ tristate "Aspeed I2C v2 Controller" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ select I2C_SMBUS ++ help ++ If you say yes to this option, support will be included for the ++ Aspeed I2C controller with new register set. ++ ++ This driver can also be built as a module. If so, the module ++ will be called i2c-ast2600. ++ + config I2C_ASPEED + tristate "Aspeed I2C Controller" + depends on ARCH_ASPEED || COMPILE_TEST +diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile +--- a/drivers/i2c/busses/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/i2c/busses/Makefile 2025-12-23 10:16:11.163199640 +0000 +@@ -39,6 +39,7 @@ + obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o + obj-$(CONFIG_I2C_AMD_MP2) += i2c-amd-mp2-pci.o i2c-amd-mp2-plat.o + obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o ++obj-$(CONFIG_I2C_AST2600) += i2c-ast2600.o + obj-$(CONFIG_I2C_AT91) += i2c-at91.o + i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o + ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y) +diff --git a/drivers/i2c/busses/i2c-ast2600.c b/drivers/i2c/busses/i2c-ast2600.c +--- a/drivers/i2c/busses/i2c-ast2600.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i2c/busses/i2c-ast2600.c 2025-12-23 10:16:20.640040781 +0000 +@@ -0,0 +1,2421 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * ASPEED AST2600 new register set I2C controller driver ++ * ++ * Copyright (C) ASPEED Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define AST2600_I2CG_ISR 0x00 ++#define AST2600_I2CG_SLAVE_ISR 0x04 ++#define AST2600_I2CG_OWNER 0x08 ++#define AST2600_I2CG_CTRL 0x0C ++#define AST2600_I2CG_CLK_DIV_CTRL 0x10 ++ ++#define AST2600_I2CG_SLAVE_PKT_NAK BIT(4) ++#define AST2600_I2CG_M_S_SEPARATE_INTR BIT(3) ++#define AST2600_I2CG_CTRL_NEW_REG BIT(2) ++#define AST2600_I2CG_CTRL_NEW_CLK_DIV BIT(1) ++#define AST2600_GLOBAL_INIT \ ++ (AST2600_I2CG_CTRL_NEW_REG | AST2600_I2CG_CTRL_NEW_CLK_DIV) ++/* ++ * APB clk : 100Mhz ++ * div : scl : baseclk [APB/((div/2) + 1)] : tBuf [1/bclk * 16] ++ * I2CG10[31:24] base clk4 for i2c auto recovery timeout counter (0xC6) ++ * I2CG10[23:16] base clk3 for Standard-mode (100Khz) min tBuf 4.7us ++ * 0x3c : 100.8Khz : 3.225Mhz : 4.96us ++ * 0x3d : 99.2Khz : 3.174Mhz : 5.04us ++ * 0x3e : 97.65Khz : 3.125Mhz : 5.12us ++ * 0x40 : 97.75Khz : 3.03Mhz : 5.28us ++ * 0x41 : 99.5Khz : 2.98Mhz : 5.36us (default) ++ * I2CG10[15:8] base clk2 for Fast-mode (400Khz) min tBuf 1.3us ++ * 0x12 : 400Khz : 10Mhz : 1.6us ++ * I2CG10[7:0] base clk1 for Fast-mode Plus (1Mhz) min tBuf 0.5us ++ * 0x08 : 1Mhz : 20Mhz : 0.8us ++ */ ++#define AST2600_I2CCG_DIV_CTRL 0xC6411208 ++#define AST2700_I2CCG_DIV_CTRL 0xC6220904 ++#define AST2700_MIN_AC_TIMING 12000 ++ ++/* 0x00 : I2CC Controller/Target Function Control Register */ ++#define AST2600_I2CC_FUN_CTRL 0x00 ++#define AST2600_I2CC_SLAVE_ADDR_RX_EN BIT(20) ++#define AST2600_I2CC_MASTER_RETRY_MASK GENMASK(19, 18) ++#define AST2600_I2CC_MASTER_RETRY(x) (((x) & GENMASK(1, 0)) << 18) ++#define AST2600_I2CC_BUS_AUTO_RELEASE BIT(17) ++#define AST2600_I2CC_M_SDA_LOCK_EN BIT(16) ++#define AST2600_I2CC_MULTI_MASTER_DIS BIT(15) ++#define AST2600_I2CC_M_SCL_DRIVE_EN BIT(14) ++#define AST2600_I2CC_MSB_STS BIT(9) ++#define AST2600_I2CC_SDA_DRIVE_1T_EN BIT(8) ++#define AST2600_I2CC_M_SDA_DRIVE_1T_EN BIT(7) ++#define AST2600_I2CC_M_HIGH_SPEED_EN BIT(6) ++#define AST2700_I2CC_MANUAL_DEBOUNCE GENMASK(5, 4) ++/* reserver 5 : 2 */ ++#define AST2600_I2CC_SLAVE_EN BIT(1) ++#define AST2600_I2CC_MASTER_EN BIT(0) ++ ++/* 0x04 : I2CC Controller/Target Clock and AC Timing Control Register #1 */ ++#define AST2600_I2CC_AC_TIMING 0x04 ++#define AST2600_I2CC_TTIMEOUT(x) (((x) & GENMASK(4, 0)) << 24) ++#define AST2700_I2CC_TTIMEOUT(x) (((x) & GENMASK(5, 0)) << 24) ++#define AST2600_I2CC_TCKHIGHMIN(x) (((x) & GENMASK(3, 0)) << 20) ++#define AST2600_I2CC_TCKHIGH(x) (((x) & GENMASK(3, 0)) << 16) ++#define AST2600_I2CC_TCKLOW(x) (((x) & GENMASK(3, 0)) << 12) ++#define AST2600_I2CC_THDDAT(x) (((x) & GENMASK(1, 0)) << 10) ++#define AST2600_I2CC_TOUTBASECLK(x) (((x) & GENMASK(1, 0)) << 8) ++#define AST2600_I2CC_TBASECLK(x) ((x) & GENMASK(3, 0)) ++#define AST2600_I2CC_AC_TIMING_MASK GENMASK(23, 0) ++ ++/* 0x08 : I2CC Controller/Target Transmit/Receive Byte Buffer Register */ ++#define AST2600_I2CC_STS_AND_BUFF 0x08 ++#define AST2600_I2CC_TX_DIR_MASK GENMASK(31, 29) ++#define AST2600_I2CC_SDA_OE BIT(28) ++#define AST2600_I2CC_SDA_O BIT(27) ++#define AST2600_I2CC_SCL_OE BIT(26) ++#define AST2600_I2CC_SCL_O BIT(25) ++ ++#define AST2600_I2CC_SCL_LINE_STS BIT(18) ++#define AST2600_I2CC_SDA_LINE_STS BIT(17) ++#define AST2600_I2CC_BUS_BUSY_STS BIT(16) ++ ++#define AST2600_I2CC_GET_RX_BUFF(x) (((x) >> 8) & GENMASK(7, 0)) ++ ++/* 0x0C : I2CC Controller/Target Pool Buffer Control Register */ ++#define AST2600_I2CC_BUFF_CTRL 0x0C ++#define AST2600_I2CC_GET_RX_BUF_LEN(x) (((x) & GENMASK(29, 24)) >> 24) ++#define AST2600_I2CC_SET_RX_BUF_LEN(x) (((((x) - 1) & GENMASK(4, 0)) << 16) | BIT(0)) ++#define AST2600_I2CC_SET_TX_BUF_LEN(x) (((((x) - 1) & GENMASK(4, 0)) << 8) | BIT(0)) ++#define AST2600_I2CC_GET_TX_BUF_LEN(x) ((((x) & GENMASK(12, 8)) >> 8) + 1) ++ ++/* 0x10 : I2CM Controller Interrupt Control Register */ ++#define AST2600_I2CM_IER 0x10 ++/* 0x14 : I2CM Controller Interrupt Status Register : WC */ ++#define AST2600_I2CM_ISR 0x14 ++ ++#define AST2600_I2CM_ISR_MASK GENMASK(31, 21) ++#define AST2600_I2CM_SW_ISR_MASK GENMASK(31, 19) ++ ++#define AST2600_I2CM_PKT_TIMEOUT BIT(18) ++#define AST2600_I2CM_PKT_ERROR BIT(17) ++#define AST2600_I2CM_PKT_DONE BIT(16) ++ ++#define AST2600_I2CM_BUS_RECOVER_FAIL BIT(15) ++#define AST2600_I2CM_SDA_DL_TO BIT(14) ++#define AST2600_I2CM_BUS_RECOVER BIT(13) ++#define AST2600_I2CM_SMBUS_ALT BIT(12) ++#define AST2700_I2CM_ABNORMAL_ACTION BIT(8) ++ ++#define AST2600_I2CM_SCL_LOW_TO BIT(6) ++#define AST2600_I2CM_ABNORMAL BIT(5) ++#define AST2600_I2CM_NORMAL_STOP BIT(4) ++#define AST2600_I2CM_ARBIT_LOSS BIT(3) ++#define AST2600_I2CM_RX_DONE BIT(2) ++#define AST2600_I2CM_TX_NAK BIT(1) ++#define AST2600_I2CM_TX_ACK BIT(0) ++ ++/* 0x18 : I2CM Controller Command/Status Register */ ++#define AST2600_I2CM_CMD_STS 0x18 ++#define AST2600_I2CM_PKT_ADDR(x) (((x) & GENMASK(6, 0)) << 24) ++#define AST2600_I2CM_PKT_EN BIT(16) ++#define AST2600_I2CM_SDA_OE_OUT_DIR BIT(15) ++#define AST2600_I2CM_SDA_O_OUT_DIR BIT(14) ++#define AST2600_I2CM_SCL_OE_OUT_DIR BIT(13) ++#define AST2600_I2CM_SCL_O_OUT_DIR BIT(12) ++#define AST2600_I2CM_RECOVER_CMD_EN BIT(11) ++ ++#define AST2600_I2CM_RX_DMA_EN BIT(9) ++#define AST2600_I2CM_TX_DMA_EN BIT(8) ++/* Command Bit */ ++#define AST2600_I2CM_RX_BUFF_EN BIT(7) ++#define AST2600_I2CM_TX_BUFF_EN BIT(6) ++#define AST2600_I2CM_STOP_CMD BIT(5) ++#define AST2600_I2CM_RX_CMD_LAST BIT(4) ++#define AST2600_I2CM_RX_CMD BIT(3) ++ ++#define AST2600_I2CM_TX_CMD BIT(1) ++#define AST2600_I2CM_START_CMD BIT(0) ++ ++/* 0x1C : I2CM Controller DMA Transfer Length Register */ ++#define AST2600_I2CM_DMA_LEN 0x1C ++/* Controller Tx Rx support length 1 ~ 4096 */ ++#define AST2600_I2CM_SET_RX_DMA_LEN(x) ((((x) & GENMASK(11, 0)) << 16) | BIT(31)) ++#define AST2600_I2CM_SET_TX_DMA_LEN(x) (((x) & GENMASK(11, 0)) | BIT(15)) ++ ++/* 0x20 : I2CS Target Interrupt Control Register */ ++#define AST2600_I2CS_IER 0x20 ++/* 0x24 : I2CS Target Interrupt Status Register */ ++#define AST2600_I2CS_ISR 0x24 ++ ++#define AST2600_I2CS_ADDR_INDICATE_MASK GENMASK(31, 30) ++#define AST2600_I2CS_SLAVE_PENDING BIT(29) ++#define AST2600_I2CS_SADDR_PENDING BIT(28) ++ ++#define AST2600_I2CS_WAIT_TX_DMA BIT(25) ++#define AST2600_I2CS_WAIT_RX_DMA BIT(24) ++ ++#define AST2600_I2CS_ADDR3_NAK BIT(22) ++#define AST2600_I2CS_ADDR2_NAK BIT(21) ++#define AST2600_I2CS_ADDR1_NAK BIT(20) ++ ++#define AST2600_I2CS_ADDR_NAK_MASK GENMASK(22, 20) ++#define AST2600_I2CS_ADDR_MASK GENMASK(19, 18) ++#define AST2600_I2CS_GET_TARGET(x) (((x) >> 30) & 0x3) ++#define AST2600_I2CS_PKT_ERROR BIT(17) ++#define AST2600_I2CS_PKT_DONE BIT(16) ++#define AST2600_I2CS_INACTIVE_TO BIT(15) ++ ++#define AST2600_I2CS_SLAVE_MATCH BIT(7) ++#define AST2600_I2CS_ABNOR_STOP BIT(5) ++#define AST2600_I2CS_STOP BIT(4) ++#define AST2600_I2CS_RX_DONE_NAK BIT(3) ++#define AST2600_I2CS_RX_DONE BIT(2) ++#define AST2600_I2CS_TX_NAK BIT(1) ++#define AST2600_I2CS_TX_ACK BIT(0) ++ ++/* 0x28 : I2CS Target CMD/Status Register */ ++#define AST2600_I2CS_CMD_STS 0x28 ++#define AST2600_I2CS_ACTIVE_ALL GENMASK(18, 17) ++#define AST2600_I2CS_PKT_MODE_EN BIT(16) ++#define AST2600_I2CS_AUTO_NAK_NOADDR BIT(15) ++#define AST2600_I2CS_AUTO_NAK_EN BIT(14) ++ ++#define AST2600_I2CS_ALT_EN BIT(10) ++#define AST2600_I2CS_RX_DMA_EN BIT(9) ++#define AST2600_I2CS_TX_DMA_EN BIT(8) ++#define AST2600_I2CS_RX_BUFF_EN BIT(7) ++#define AST2600_I2CS_TX_BUFF_EN BIT(6) ++#define AST2600_I2CS_RX_CMD_LAST BIT(4) ++ ++#define AST2600_I2CS_TX_CMD BIT(2) ++ ++#define AST2600_I2CS_DMA_LEN 0x2C ++ ++/* Target Tx Rx support length 1 ~ 4096 */ ++#define AST2600_I2CS_SET_RX_DMA_LEN(x) (((((x) - 1) & GENMASK(11, 0)) << 16) | BIT(31)) ++#define AST2600_I2CS_SET_TX_DMA_LEN(x) ((((x) - 1) & GENMASK(11, 0)) | BIT(15)) ++ ++/* I2CM Controller DMA Tx Buffer Register */ ++#define AST2600_I2CM_TX_DMA 0x30 ++/* I2CM Controller DMA Rx Buffer Register */ ++#define AST2600_I2CM_RX_DMA 0x34 ++/* I2CS Target DMA Tx Buffer Register */ ++#define AST2600_I2CS_TX_DMA 0x38 ++/* I2CS Target DMA Rx Buffer Register */ ++#define AST2600_I2CS_RX_DMA 0x3C ++ ++/* I2CM Controller DMA Rx Buffer High part Register */ ++#define AST2600_I2CM_TX_DMA_H 0x60 ++/* I2CM Controller DMA Rx Buffer High part Register */ ++#define AST2600_I2CM_RX_DMA_H 0x64 ++/* I2CS Target DMA Tx Buffer High part Register */ ++#define AST2600_I2CS_TX_DMA_H 0x68 ++/* I2CS Target DMA Rx Buffer High part Register */ ++#define AST2600_I2CS_RX_DMA_H 0x6C ++ ++#define AST2600_I2CS_ADDR_CTRL 0x40 ++ ++#define AST2600_I2CS_ADDR3_MASK GENMASK(22, 16) ++#define AST2600_I2CS_ADDR2_MASK GENMASK(14, 8) ++#define AST2600_I2CS_ADDR1_MASK GENMASK(6, 0) ++ ++#define AST2600_I2CM_DMA_LEN_STS 0x48 ++#define AST2600_I2CS_DMA_LEN_STS 0x4C ++ ++#define AST2600_I2C_GET_TX_DMA_LEN(x) ((x) & GENMASK(12, 0)) ++#define AST2600_I2C_GET_RX_DMA_LEN(x) (((x) & GENMASK(28, 16)) >> 16) ++ ++/* 0x40 : Target Device Address Register */ ++#define AST2600_I2CS_ADDR3_ENABLE BIT(23) ++#define AST2600_I2CS_ADDR3(x) ((x) << 16) ++#define AST2600_I2CS_ADDR2_ENABLE BIT(15) ++#define AST2600_I2CS_ADDR2(x) ((x) << 8) ++#define AST2600_I2CS_ADDR1_ENABLE BIT(7) ++#define AST2600_I2CS_ADDR1(x) (x) ++ ++/* 0x74 : Target Device Address Register */ ++#define MSIC_CONFIG_ACTIMING1 0x74 ++#define MSIC_I2C_SET_TIMEOUT(s, m) (((s) << 16) | (m)) ++ ++/* 0x78 : Misc status */ ++#define MSIC_STATUS 0x78 ++ ++/* 0x84 : Byte data log */ ++#define BYTE_DATA_LOG 0x84 ++ ++#define AST2700_I2CC_GET_BUFF(x) ((x) & GENMASK(7, 0)) ++ ++/* 0x8c : Target sirq log */ ++#define AST2700_I2CC_SIRQ_LOG 0x8c ++#define SLAVE_ADDR_SHIFT 8 ++#define SLAVE_ADDR_MASK GENMASK(15, 8) ++#define SADDR_NACK BIT(5) ++#define SLAVE_PKT_DONE BIT(4) ++#define SADDR_HIT BIT(3) ++#define SRX_DONE BIT(2) ++#define STX_DONE BIT(1) ++#define SLAVE_STOP BIT(0) ++ ++/* 0x9c : Misc_2 Debounce Setting */ ++#define MSIC2_CONFIG 0x9C ++#define AST2700_DEBOUNCE_MASK GENMASK(7, 0) ++#define AST2700_DEBOUNCE_LEVEL_MAX 0x20 ++#define AST2700_DEBOUNCE_LEVEL_MIN 0x2 ++ ++#define I2C_TARGET_MSG_BUF_SIZE 4096 ++ ++#define AST2600_I2C_DMA_SIZE 4096 ++ ++#define CONTROLLER_TRIGGER_LAST_STOP (AST2600_I2CM_RX_CMD_LAST | AST2600_I2CM_STOP_CMD) ++#define TARGET_TRIGGER_CMD (AST2600_I2CS_ACTIVE_ALL | AST2600_I2CS_PKT_MODE_EN) ++ ++#define AST2600_I2C_TIMEOUT_CLK 0x1 ++#define AST2700_I2C_TIMEOUT_CLK 0x3 ++ ++#define AST2600_I2C_TARGET_COUNT 0x3 ++ ++enum xfer_mode { ++ BYTE_MODE, ++ BUFF_MODE, ++ DMA_MODE, ++}; ++ ++enum i2c_version { ++ AST2600, ++ AST2700, ++}; ++ ++struct i2c_divisor { ++ u32 baseclk_idx; ++ u32 divisor; ++ u8 baseclk_limit; ++}; ++ ++struct ast2600_i2c_bus { ++ struct i2c_adapter adap; ++ struct device *dev; ++ void __iomem *reg_base; ++ struct regmap *global_regs; ++ struct reset_control *rst; ++ struct clk *clk; ++ struct i2c_timings timing_info; ++ struct completion cmd_complete; ++ struct i2c_msg *msgs; ++ u8 *controller_safe_buf; ++ dma_addr_t controller_dma_addr; ++ u32 apb_clk; ++ struct i2c_divisor clk_divisor; ++ u32 timeout; ++ int irq; ++ int cmd_err; ++ int msgs_index; ++ int msgs_count; ++ int controller_xfer_cnt; ++ size_t buf_index; ++ size_t buf_size; ++ enum xfer_mode mode; ++ enum i2c_version version; ++ bool multi_master; ++ u32 debounce_level; ++ /* Buffer mode */ ++ void __iomem *buf_base; ++ /* smbus alert */ ++ bool alert_enable; ++ struct i2c_smbus_alert_setup alert_data; ++ struct i2c_client *ara; ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ int target_operate; ++ int previous_idx; ++ unsigned char *target_dma_buf; ++ dma_addr_t target_dma_addr; ++ u8 target_attached; ++ struct i2c_client *multi_target[AST2600_I2C_TARGET_COUNT]; ++ struct i2c_client *target; ++#endif ++}; ++ ++static void ast2600_i2c_ac_timing_config(struct ast2600_i2c_bus *i2c_bus) ++{ ++ unsigned long base_clk[16]; ++ int baseclk_idx = 0; ++ int divisor = 0; ++ u32 clk_div_reg; ++ u32 scl_low; ++ u32 scl_high; ++ u32 data; ++ ++ regmap_read(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, &clk_div_reg); ++ ++ for (int i = 0; i < ARRAY_SIZE(base_clk); i++) { ++ if (i == 0) ++ base_clk[i] = i2c_bus->apb_clk; ++ else if (i < 5) ++ base_clk[i] = (i2c_bus->apb_clk * 2) / ++ (((clk_div_reg >> ((i - 1) * 8)) & GENMASK(7, 0)) + 2); ++ else ++ base_clk[i] = base_clk[4] >> (i - 4); ++ if ((base_clk[i] / i2c_bus->timing_info.bus_freq_hz) <= 32) { ++ baseclk_idx = i; ++ divisor = DIV_ROUND_UP(base_clk[i], i2c_bus->timing_info.bus_freq_hz); ++ break; ++ } ++ } ++ ++ baseclk_idx = min(baseclk_idx, 15); ++ divisor = min(divisor, 32); ++ scl_low = min(divisor * 9 / 16 - 1, 15); ++ scl_high = (divisor - scl_low - 2) & GENMASK(3, 0); ++ data = (scl_high - 1) << 20 | scl_high << 16 | scl_low << 12 | baseclk_idx; ++ ++ if (i2c_bus->timeout) { ++ i2c_bus->timeout = min(i2c_bus->timeout, 31); ++ data |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout); ++ data |= AST2600_I2CC_TOUTBASECLK(AST2600_I2C_TIMEOUT_CLK); ++ } ++ ++ writel(data, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++} ++ ++static void ast2700_i2c_ac_timing_config(struct ast2600_i2c_bus *i2c_bus) ++{ ++ unsigned long base_clk; ++ int baseclk_idx = 0; ++ int divisor = 0; ++ u32 clk_div_reg; ++ u32 scl_low; ++ u32 scl_high; ++ u32 data; ++ u8 divid_term = 0; ++ ++ /* The i2c minmum ac-timing is 12KHz */ ++ if (i2c_bus->timing_info.bus_freq_hz < AST2700_MIN_AC_TIMING) { ++ dev_err(i2c_bus->dev, "The frequency could not be lower than 12KHz.\n"); ++ i2c_bus->timing_info.bus_freq_hz = AST2700_MIN_AC_TIMING; ++ } ++ ++ regmap_read(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, &clk_div_reg); ++ ++ /* Find the most used ac-timing */ ++ for (int i = 0; i < 3; i++) { ++ divid_term = ((clk_div_reg >> (i << 3)) & GENMASK(7, 0)); ++ base_clk = (i2c_bus->apb_clk) / (divid_term + 1); ++ if ((base_clk / i2c_bus->timing_info.bus_freq_hz) <= 32) { ++ baseclk_idx = divid_term; ++ divisor = DIV_ROUND_UP(base_clk, i2c_bus->timing_info.bus_freq_hz); ++ break; ++ } ++ } ++ ++ /* Can't find a ac-timing then search a fitting one */ ++ if (baseclk_idx == 0) { ++ for (int i = 0; i < 0x100; i++) { ++ base_clk = (i2c_bus->apb_clk) / (i + 1); ++ if ((base_clk / i2c_bus->timing_info.bus_freq_hz) <= 32) { ++ baseclk_idx = i; ++ divisor = DIV_ROUND_UP(base_clk, i2c_bus->timing_info.bus_freq_hz); ++ break; ++ } ++ } ++ } ++ ++ baseclk_idx = min(baseclk_idx, 0xff); ++ divisor = min(divisor, 32); ++ scl_low = min((DIV_ROUND_UP(divisor * 9, 16)) - 1, 15); ++ scl_high = (divisor - scl_low - 2) & GENMASK(3, 0); ++ data = (scl_high - 1) << 20 | scl_high << 16 | scl_low << 12 | baseclk_idx; ++ ++ if (i2c_bus->timeout) { ++ i2c_bus->timeout = min(i2c_bus->timeout, 255); ++ writel(MSIC_I2C_SET_TIMEOUT(i2c_bus->timeout, 0), ++ i2c_bus->reg_base + MSIC_CONFIG_ACTIMING1); ++ /* timeout_base set as 1ms */ ++ data |= AST2600_I2CC_TOUTBASECLK(AST2700_I2C_TIMEOUT_CLK); ++ } ++ ++ writel(data, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++} ++ ++static int ast2600_i2c_recover_bus(struct ast2600_i2c_bus *i2c_bus) ++{ ++ u32 state = readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ int ret = 0; ++ int r; ++ ++ dev_dbg(i2c_bus->dev, "%d-bus recovery bus [%x]\n", i2c_bus->adap.nr, state); ++ ++ reinit_completion(&i2c_bus->cmd_complete); ++ i2c_bus->cmd_err = 0; ++ ++ /* Check 0x14's SDA and SCL status */ ++ state = readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ if (!(state & AST2600_I2CC_SDA_LINE_STS) && (state & AST2600_I2CC_SCL_LINE_STS)) { ++ writel(AST2600_I2CM_RECOVER_CMD_EN, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ r = wait_for_completion_timeout(&i2c_bus->cmd_complete, i2c_bus->adap.timeout); ++ if (r == 0) { ++ dev_dbg(i2c_bus->dev, "recovery timed out\n"); ++ return -ETIMEDOUT; ++ } else if (i2c_bus->cmd_err) { ++ dev_dbg(i2c_bus->dev, "recovery error\n"); ++ ret = -EPROTO; ++ } ++ } ++ ++ /* Recovery done */ ++ state = readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ if (state & AST2600_I2CC_BUS_BUSY_STS) { ++ dev_dbg(i2c_bus->dev, "Can't recover bus [%x]\n", state); ++ ret = -EPROTO; ++ } ++ ++ return ret; ++} ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++static void ast2700_i2c_get_target(struct ast2600_i2c_bus *i2c_bus, u8 addr) ++{ ++ u8 i = 0; ++ bool target_find = false; ++ ++ /* find target by address */ ++ for (i = 0; i < AST2600_I2C_TARGET_COUNT; i++) { ++ if (i2c_bus->multi_target[i]) { ++ if (i2c_bus->multi_target[i]->addr == addr) { ++ dev_dbg(i2c_bus->dev, "address [%x] on %d\n", addr, i); ++ i2c_bus->target = i2c_bus->multi_target[i]; ++ target_find = true; ++ } ++ } ++ } ++ ++ if (!target_find) ++ dev_err(i2c_bus->dev, "address [%x] could not find\n", addr); ++} ++ ++static void ast2700_i2c_target_packet_dma_irq(struct ast2600_i2c_bus *i2c_bus, u32 isr) ++{ ++ int target_rx_len = 0; ++ u32 cmd = 0; ++ u8 value; ++ int i; ++ u32 sirq_log; ++ u32 sts; ++ ++ writel(AST2600_I2CS_SADDR_PENDING | AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_WAIT_RX_DMA, ++ i2c_bus->reg_base + AST2600_I2CS_ISR); ++ isr = readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ ++ sts = isr & ~(AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_ADDR_NAK_MASK); ++ /* Handle i2c target timeout condition */ ++ if (AST2600_I2CS_INACTIVE_TO & sts) { ++ dev_dbg(i2c_bus->dev, "The target timeout occurs isr: 0x%08x.\n", isr); ++ /* Reset timeout counter */ ++ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & ++ AST2600_I2CC_AC_TIMING_MASK; ++ ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ ac_timing |= AST2700_I2CC_TTIMEOUT(i2c_bus->timeout); ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ /* clear sirq log */ ++ while (readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG)) { ++ /* assign the target client*/ ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ } ++ }; ++ writel(isr, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ } ++ return; ++ } ++ ++ if (AST2600_I2CS_ABNOR_STOP & sts) { ++ dev_err(i2c_bus->dev, "The target abnomal protocol occurs isr: 0x%08x.\n", isr); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ /* clear sirq log */ ++ while (readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG)) { ++ /* assign the target client*/ ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ } ++ }; ++ writel(isr, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ } ++ return; ++ } ++ ++ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR); ++ ++ switch (sts) { ++ case AST2600_I2CS_SADDR_PENDING | AST2600_I2CS_WAIT_RX_DMA | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ writel(AST2600_I2CS_SLAVE_MATCH, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ isr = readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ /* bug workaround */ ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ } ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SADDR_PENDING | AST2600_I2CS_SLAVE_MATCH | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ /* workaround: false alarm target match check */ ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ } ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ /* workaround new target match */ ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ } ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | AST2600_I2CS_SLAVE_MATCH: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (sirq_log & SADDR_HIT) { ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ /* workaround: not clear target match due to wait next isr check tx or rx */ ++ isr &= ~AST2600_I2CS_SLAVE_MATCH; ++ break; ++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_TX_NAK | ++ AST2600_I2CS_STOP | AST2600_I2CS_SLAVE_MATCH: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_TX_ACK | AST2600_I2CS_WAIT_TX_DMA: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_PROCESSED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_SLAVE_MATCH | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target = NULL; ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_SADDR_PENDING | AST2600_I2CS_WAIT_TX_DMA | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ writel(AST2600_I2CS_SLAVE_MATCH, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ isr = readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_SADDR_PENDING | AST2600_I2CS_WAIT_TX_DMA | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ writel(AST2600_I2CS_SLAVE_MATCH, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ isr = readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ } ++ if (i2c_bus->target) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_TX_DMA: ++ sirq_log = readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG); ++ if (!i2c_bus->target) ++ ast2700_i2c_get_target(i2c_bus, sirq_log >> SLAVE_ADDR_SHIFT); ++ if (i2c_bus->target) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ } ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ default: ++ dev_dbg(i2c_bus->dev, "unhandled target isr case %x, sts %x\n", sts, ++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF)); ++ ++ /* clear sirq log */ ++ while (readl(i2c_bus->reg_base + AST2700_I2CC_SIRQ_LOG)) ++ ; ++ break; ++ } ++ ++ if (cmd) ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ ++ writel(isr, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++} ++ ++static void ast2600_i2c_target_packet_dma_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts) ++{ ++ int target_rx_len = 0; ++ u32 cmd = 0; ++ u8 value; ++ int i; ++ ++ sts &= ~(AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_SADDR_PENDING ++ | AST2600_I2CS_ADDR_NAK_MASK); ++ ++ i2c_bus->target = i2c_bus->multi_target[AST2600_I2CS_GET_TARGET(sts)]; ++ ++ /* Handle i2c target timeout condition */ ++ if (AST2600_I2CS_INACTIVE_TO & sts) { ++ /* Reset time out counter */ ++ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & ++ AST2600_I2CC_AC_TIMING_MASK; ++ ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ ac_timing |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout); ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ /* set rx dma length ,re-send target trigger command and clear irq status */ ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ writel(TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN, ++ i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ return; ++ } ++ ++ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR | AST2600_I2CS_ADDR_INDICATE_MASK); ++ ++ switch (sts) { ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA: ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE_NAK | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_RX_DONE_NAK | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_STOP: ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA: ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ if (sts & AST2600_I2CS_SLAVE_MATCH) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ if (sts & AST2600_I2CS_STOP) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ ++ /* it is Mw data Mr coming -> it need send tx */ ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_TX_DMA: ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_TX_DMA: ++ /* it should be repeat start read */ ++ if (sts & AST2600_I2CS_SLAVE_MATCH) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ ++ target_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CS_DMA_LEN_STS)); ++ for (i = 0; i < target_rx_len; i++) { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, ++ &i2c_bus->target_dma_buf[i]); ++ } ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA: ++ /* First Start read */ ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, ++ &i2c_bus->target_dma_buf[0]); ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_WAIT_TX_DMA: ++ /* it should be next start read */ ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_PROCESSED, ++ &i2c_bus->target_dma_buf[0]); ++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN; ++ break; ++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | AST2600_I2CS_SLAVE_MATCH: ++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP: ++ /* it just tx complete */ ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ break; ++ case AST2600_I2CS_STOP: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ break; ++ default: ++ dev_dbg(i2c_bus->dev, "unhandled target isr case %x, sts %x\n", sts, ++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF)); ++ break; ++ } ++ ++ if (cmd) ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++} ++ ++static void ast2600_i2c_target_packet_buff_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts) ++{ ++ int target_rx_len = 0; ++ u32 cmd = 0; ++ u8 value; ++ int i; ++ ++ /* due to controller target share same buffer, so need force the master stop not issue */ ++ if (readl(i2c_bus->reg_base + AST2600_I2CM_CMD_STS) & GENMASK(15, 0)) { ++ writel(0, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ i2c_bus->cmd_err = -EBUSY; ++ writel(0, i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ complete(&i2c_bus->cmd_complete); ++ } ++ ++ i2c_bus->target = i2c_bus->multi_target[AST2600_I2CS_GET_TARGET(sts)]; ++ ++ /* Handle i2c target timeout condition */ ++ if (AST2600_I2CS_INACTIVE_TO & sts) { ++ /* Reset time out counter */ ++ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & ++ AST2600_I2CC_AC_TIMING_MASK; ++ ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ ac_timing |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout); ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ /* Re-send target trigger command and clear irq */ ++ writel(TARGET_TRIGGER_CMD, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target_operate = 0; ++ return; ++ } ++ ++ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR | AST2600_I2CS_ADDR_INDICATE_MASK); ++ ++ if (sts & AST2600_I2CS_SLAVE_MATCH) ++ i2c_bus->target_operate = 1; ++ ++ switch (sts) { ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_SLAVE_PENDING | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ case AST2600_I2CS_SLAVE_PENDING | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ fallthrough; ++ case AST2600_I2CS_SLAVE_PENDING | ++ AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ cmd = TARGET_TRIGGER_CMD; ++ if (sts & AST2600_I2CS_RX_DONE) { ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ } ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS) & AST2600_I2CS_RX_BUFF_EN) ++ cmd = 0; ++ else ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN; ++ ++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ break; ++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_RX_DONE: ++ cmd = TARGET_TRIGGER_CMD; ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ cmd |= AST2600_I2CS_RX_BUFF_EN; ++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ break; ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA | ++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ cmd = TARGET_TRIGGER_CMD; ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ cmd |= AST2600_I2CS_RX_BUFF_EN; ++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ break; ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ cmd = TARGET_TRIGGER_CMD; ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ /* workaround for avoid next start with len != 0 */ ++ writel(BIT(0), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ break; ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP: ++ cmd = TARGET_TRIGGER_CMD; ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ /* workaround for avoid next start with len != 0 */ ++ writel(BIT(0), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ break; ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_RX_DONE | ++ AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_STOP: ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, &value); ++ writeb(value, i2c_bus->buf_base); ++ break; ++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_SLAVE_MATCH: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, &value); ++ writeb(value, i2c_bus->buf_base); ++ writel(AST2600_I2CC_SET_TX_BUF_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_BUFF_EN; ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_RX_DONE: ++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_RX_DONE: ++ case AST2600_I2CS_WAIT_TX_DMA: ++ /* it should be repeat start read */ ++ if (sts & AST2600_I2CS_SLAVE_MATCH) ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ ++ if (sts & AST2600_I2CS_RX_DONE) { ++ target_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < target_rx_len; i++) { ++ value = readb(i2c_bus->buf_base + 0x10 + i); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &value); ++ } ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, &value); ++ } else { ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_PROCESSED, &value); ++ } ++ writeb(value, i2c_bus->buf_base); ++ writel(AST2600_I2CC_SET_TX_BUF_LEN(1), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_TX_BUFF_EN; ++ break; ++ /* workaround : trigger the cmd twice to fix next state keep 1000000 */ ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ cmd = TARGET_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN; ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ break; ++ /* the pending slave needs to be cleared with TX_NAK and STOP here */ ++ /* other flags will be handled in the next irq callback */ ++ /* the slave index will be updated when the slave match occurs */ ++ /* use the pervious idx to do the slave stop event */ ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE: ++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | ++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA: ++ cmd = TARGET_TRIGGER_CMD; ++ i2c_bus->target = i2c_bus->multi_target[i2c_bus->previous_idx]; ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ i2c_bus->target_operate = 0; ++ break; ++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP: ++ case AST2600_I2CS_STOP: ++ cmd = TARGET_TRIGGER_CMD; ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ break; ++ default: ++ dev_dbg(i2c_bus->dev, "unhandled target isr case %x, sts %x\n", sts, ++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF)); ++ break; ++ } ++ ++ if (cmd) ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ ++ if ((sts & AST2600_I2CS_STOP) && !(sts & AST2600_I2CS_SLAVE_PENDING)) ++ i2c_bus->target_operate = 0; ++ else ++ i2c_bus->previous_idx = AST2600_I2CS_GET_TARGET(sts); ++} ++ ++static void ast2600_i2c_target_byte_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts) ++{ ++ u32 i2c_buff = readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ u32 cmd = AST2600_I2CS_ACTIVE_ALL; ++ u8 byte_data; ++ u8 value; ++ ++ i2c_bus->target = i2c_bus->multi_target[AST2600_I2CS_GET_TARGET(sts)]; ++ ++ /* Handle i2c target timeout condition */ ++ if (AST2600_I2CS_INACTIVE_TO & sts) { ++ /* Reset time out counter */ ++ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & ++ AST2600_I2CC_AC_TIMING_MASK; ++ ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ ac_timing |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout); ++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING); ++ /* Re-send target trigger command and clear irq */ ++ writel(AST2600_I2CS_ACTIVE_ALL, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(sts, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ return; ++ } ++ ++ sts &= ~(AST2600_I2CS_ADDR_INDICATE_MASK); ++ ++ switch (sts) { ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_REQUESTED, &value); ++ /* first address match is address */ ++ byte_data = AST2600_I2CC_GET_RX_BUFF(i2c_buff); ++ break; ++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA: ++ byte_data = AST2600_I2CC_GET_RX_BUFF(i2c_buff); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_WRITE_RECEIVED, &byte_data); ++ break; ++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_TX_DMA: ++ cmd |= AST2600_I2CS_TX_CMD; ++ byte_data = AST2600_I2CC_GET_RX_BUFF(i2c_buff); ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_REQUESTED, &byte_data); ++ writel(byte_data, i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ break; ++ case AST2600_I2CS_TX_ACK | AST2600_I2CS_WAIT_TX_DMA: ++ cmd |= AST2600_I2CS_TX_CMD; ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_READ_PROCESSED, &byte_data); ++ writel(byte_data, i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ break; ++ case AST2600_I2CS_STOP: ++ case AST2600_I2CS_STOP | AST2600_I2CS_TX_NAK: ++ i2c_slave_event(i2c_bus->target, I2C_SLAVE_STOP, &value); ++ break; ++ default: ++ dev_dbg(i2c_bus->dev, "unhandled pkt isr %x\n", sts); ++ break; ++ } ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(sts, i2c_bus->reg_base + AST2600_I2CS_ISR); ++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++} ++ ++static int ast2600_i2c_target_irq(struct ast2600_i2c_bus *i2c_bus) ++{ ++ u32 ier = readl(i2c_bus->reg_base + AST2600_I2CS_IER); ++ u32 isr = readl(i2c_bus->reg_base + AST2600_I2CS_ISR); ++ ++ if (!(isr & ier)) ++ return 0; ++ ++ /* ++ * Target interrupt coming after Controller package done ++ * So need handle controller first. ++ */ ++ if (readl(i2c_bus->reg_base + AST2600_I2CM_ISR) & AST2600_I2CM_PKT_DONE) ++ return 0; ++ ++ isr &= ~(AST2600_I2CS_ADDR_MASK); ++ ++ if (AST2600_I2CS_PKT_DONE & isr) { ++ if (i2c_bus->mode == DMA_MODE) { ++ if (i2c_bus->version == AST2700) ++ ast2700_i2c_target_packet_dma_irq(i2c_bus, isr); ++ else ++ ast2600_i2c_target_packet_dma_irq(i2c_bus, isr); ++ } else { ++ ast2600_i2c_target_packet_buff_irq(i2c_bus, isr); ++ } ++ } else { ++ ast2600_i2c_target_byte_irq(i2c_bus, isr); ++ } ++ ++ return 1; ++} ++#endif ++ ++static int ast2600_i2c_setup_dma_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len = msg->len - i2c_bus->controller_xfer_cnt; ++ int ret; ++ ++ cmd |= AST2600_I2CM_PKT_EN; ++ ++ if (xfer_len > AST2600_I2C_DMA_SIZE) ++ xfer_len = AST2600_I2C_DMA_SIZE; ++ else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) ++ cmd |= AST2600_I2CM_STOP_CMD; ++ ++ if (cmd & AST2600_I2CM_START_CMD) { ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ if (xfer_len) { ++ i2c_bus->controller_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1); ++ if (!i2c_bus->controller_safe_buf) ++ return -ENOMEM; ++ i2c_bus->controller_dma_addr = ++ dma_map_single(i2c_bus->dev, i2c_bus->controller_safe_buf, ++ msg->len, DMA_TO_DEVICE); ++ ret = dma_mapping_error(i2c_bus->dev, i2c_bus->controller_dma_addr); ++ if (ret) { ++ i2c_put_dma_safe_msg_buf(i2c_bus->controller_safe_buf, msg, false); ++ i2c_bus->controller_safe_buf = NULL; ++ return ret; ++ } ++ } ++ } ++ ++ if (xfer_len) { ++ cmd |= AST2600_I2CM_TX_DMA_EN | AST2600_I2CM_TX_CMD; ++ writel(AST2600_I2CM_SET_TX_DMA_LEN(xfer_len - 1), ++ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN); ++ writel(lower_32_bits(i2c_bus->controller_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CM_TX_DMA); ++ writel(upper_32_bits(i2c_bus->controller_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CM_TX_DMA_H); ++ } ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_setup_buff_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len = msg->len - i2c_bus->controller_xfer_cnt; ++ u32 wbuf_dword; ++ int i; ++ ++ cmd |= AST2600_I2CM_PKT_EN; ++ ++ if (xfer_len > i2c_bus->buf_size) ++ xfer_len = i2c_bus->buf_size; ++ else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) ++ cmd |= AST2600_I2CM_STOP_CMD; ++ ++ if (cmd & AST2600_I2CM_START_CMD) ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ ++ if (xfer_len) { ++ cmd |= AST2600_I2CM_TX_BUFF_EN | AST2600_I2CM_TX_CMD; ++ /* ++ * The controller's buffer register supports dword writes only. ++ * Therefore, write dwords to the buffer register in a 4-byte aligned, ++ * and write the remaining unaligned data at the end. ++ */ ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR)) ++ return -ENOMEM; ++ for (i = 0; i < xfer_len; i += 4) { ++ int xfer_cnt = i2c_bus->controller_xfer_cnt + i; ++ ++ switch (min(xfer_len - i, 4) % 4) { ++ case 1: ++ wbuf_dword = msg->buf[xfer_cnt]; ++ break; ++ case 2: ++ wbuf_dword = get_unaligned_le16(&msg->buf[xfer_cnt]); ++ break; ++ case 3: ++ wbuf_dword = get_unaligned_le24(&msg->buf[xfer_cnt]); ++ break; ++ default: ++ wbuf_dword = get_unaligned_le32(&msg->buf[xfer_cnt]); ++ break; ++ } ++ writel(wbuf_dword, i2c_bus->buf_base + i); ++ } ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR)) ++ return -ENOMEM; ++ writel(AST2600_I2CC_SET_TX_BUF_LEN(xfer_len), ++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ } ++ ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR)) ++ return -ENOMEM; ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_setup_byte_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len; ++ ++ xfer_len = msg->len - i2c_bus->controller_xfer_cnt; ++ ++ cmd |= AST2600_I2CM_PKT_EN; ++ ++ if (cmd & AST2600_I2CM_START_CMD) ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ ++ if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) && ++ xfer_len == 1) ++ cmd |= AST2600_I2CM_STOP_CMD; ++ ++ if (xfer_len) { ++ cmd |= AST2600_I2CM_TX_CMD; ++ writel(msg->buf[i2c_bus->controller_xfer_cnt], ++ i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF); ++ } ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_setup_dma_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len = msg->len - i2c_bus->controller_xfer_cnt; ++ int ret; ++ ++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_DMA_EN | AST2600_I2CM_RX_CMD; ++ ++ if (msg->flags & I2C_M_RECV_LEN) { ++ dev_dbg(i2c_bus->dev, "smbus read\n"); ++ xfer_len = 1; ++ } else if (xfer_len > AST2600_I2C_DMA_SIZE) { ++ xfer_len = AST2600_I2C_DMA_SIZE; ++ } else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) { ++ cmd |= CONTROLLER_TRIGGER_LAST_STOP; ++ } ++ ++ writel(AST2600_I2CM_SET_RX_DMA_LEN(xfer_len - 1), i2c_bus->reg_base + AST2600_I2CM_DMA_LEN); ++ ++ if (cmd & AST2600_I2CM_START_CMD) { ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ i2c_bus->controller_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1); ++ if (!i2c_bus->controller_safe_buf) ++ return -ENOMEM; ++ if (msg->flags & I2C_M_RECV_LEN) ++ i2c_bus->controller_dma_addr = ++ dma_map_single(i2c_bus->dev, i2c_bus->controller_safe_buf, ++ I2C_SMBUS_BLOCK_MAX + 3, DMA_FROM_DEVICE); ++ else ++ i2c_bus->controller_dma_addr = ++ dma_map_single(i2c_bus->dev, i2c_bus->controller_safe_buf, ++ msg->len, DMA_FROM_DEVICE); ++ ret = dma_mapping_error(i2c_bus->dev, i2c_bus->controller_dma_addr); ++ if (ret) { ++ i2c_put_dma_safe_msg_buf(i2c_bus->controller_safe_buf, msg, false); ++ i2c_bus->controller_safe_buf = NULL; ++ return -ENOMEM; ++ } ++ } ++ ++ writel(lower_32_bits(i2c_bus->controller_dma_addr + ++ i2c_bus->controller_xfer_cnt), ++ i2c_bus->reg_base + AST2600_I2CM_RX_DMA); ++ writel(upper_32_bits(i2c_bus->controller_dma_addr + ++ i2c_bus->controller_xfer_cnt), ++ i2c_bus->reg_base + AST2600_I2CM_RX_DMA_H); ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_setup_buff_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len = msg->len - i2c_bus->controller_xfer_cnt; ++ ++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_BUFF_EN | AST2600_I2CM_RX_CMD; ++ ++ if (cmd & AST2600_I2CM_START_CMD) ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ ++ if (msg->flags & I2C_M_RECV_LEN) { ++ dev_dbg(i2c_bus->dev, "smbus read\n"); ++ xfer_len = 1; ++ } else if (xfer_len > i2c_bus->buf_size) { ++ xfer_len = i2c_bus->buf_size; ++ } else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) { ++ cmd |= CONTROLLER_TRIGGER_LAST_STOP; ++ } ++ ++ writel(AST2600_I2CC_SET_RX_BUF_LEN(xfer_len), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL); ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_setup_byte_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ ++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_CMD; ++ ++ if (cmd & AST2600_I2CM_START_CMD) ++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr); ++ ++ if (msg->flags & I2C_M_RECV_LEN) { ++ dev_dbg(i2c_bus->dev, "smbus read\n"); ++ } else if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) && ++ ((i2c_bus->controller_xfer_cnt + 1) == msg->len)) { ++ cmd |= CONTROLLER_TRIGGER_LAST_STOP; ++ } ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS); ++ ++ return 0; ++} ++ ++static int ast2600_i2c_do_start(struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ ++ /* send start */ ++ dev_dbg(i2c_bus->dev, "[%d] %sing %d byte%s %s 0x%02x\n", ++ i2c_bus->msgs_index, msg->flags & I2C_M_RD ? "read" : "write", ++ msg->len, msg->len > 1 ? "s" : "", ++ msg->flags & I2C_M_RD ? "from" : "to", msg->addr); ++ ++ i2c_bus->controller_xfer_cnt = 0; ++ i2c_bus->buf_index = 0; ++ ++ if (msg->flags & I2C_M_RD) { ++ if (i2c_bus->mode == DMA_MODE) ++ return ast2600_i2c_setup_dma_rx(AST2600_I2CM_START_CMD, i2c_bus); ++ else if (i2c_bus->mode == BUFF_MODE) ++ return ast2600_i2c_setup_buff_rx(AST2600_I2CM_START_CMD, i2c_bus); ++ else ++ return ast2600_i2c_setup_byte_rx(AST2600_I2CM_START_CMD, i2c_bus); ++ } else { ++ if (i2c_bus->mode == DMA_MODE) ++ return ast2600_i2c_setup_dma_tx(AST2600_I2CM_START_CMD, i2c_bus); ++ else if (i2c_bus->mode == BUFF_MODE) ++ return ast2600_i2c_setup_buff_tx(AST2600_I2CM_START_CMD, i2c_bus); ++ else ++ return ast2600_i2c_setup_byte_tx(AST2600_I2CM_START_CMD, i2c_bus); ++ } ++} ++ ++static int ast2700_i2c_irq_err_to_errno(u32 irq_status) ++{ ++ if (irq_status & AST2700_I2CM_ABNORMAL_ACTION) ++ return -EAGAIN; ++ if (irq_status & (AST2600_I2CM_SDA_DL_TO | AST2600_I2CM_SCL_LOW_TO)) ++ return -EBUSY; ++ if (irq_status & (AST2600_I2CM_ABNORMAL)) ++ return -EPROTO; ++ ++ return 0; ++} ++ ++static int ast2600_i2c_irq_err_to_errno(u32 irq_status) ++{ ++ if (irq_status & AST2600_I2CM_ARBIT_LOSS) ++ return -EAGAIN; ++ if (irq_status & (AST2600_I2CM_SDA_DL_TO | AST2600_I2CM_SCL_LOW_TO)) ++ return -EBUSY; ++ if (irq_status & (AST2600_I2CM_ABNORMAL)) ++ return -EPROTO; ++ ++ return 0; ++} ++ ++static void ast2600_i2c_controller_package_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts) ++{ ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ int xfer_len; ++ int i; ++ ++ if (i2c_bus->version == AST2700) ++ writel(sts, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ else ++ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ ++ sts &= ~(AST2600_I2CM_PKT_DONE | AST2600_I2CM_SW_ISR_MASK); ++ ++ switch (sts) { ++ case AST2600_I2CM_PKT_ERROR: ++ i2c_bus->cmd_err = -EAGAIN; ++ complete(&i2c_bus->cmd_complete); ++ break; ++ case AST2600_I2CM_PKT_ERROR | AST2600_I2CM_TX_NAK: /* a0 fix for issue */ ++ fallthrough; ++ case AST2600_I2CM_PKT_ERROR | AST2600_I2CM_TX_NAK | AST2600_I2CM_NORMAL_STOP: ++ i2c_bus->cmd_err = -ENXIO; ++ complete(&i2c_bus->cmd_complete); ++ break; ++ case AST2600_I2CM_NORMAL_STOP: ++ /* write 0 byte only have stop isr */ ++ i2c_bus->msgs_index++; ++ if (i2c_bus->msgs_index < i2c_bus->msgs_count) { ++ if (ast2600_i2c_do_start(i2c_bus)) { ++ i2c_bus->cmd_err = -ENOMEM; ++ complete(&i2c_bus->cmd_complete); ++ } ++ } else { ++ i2c_bus->cmd_err = i2c_bus->msgs_index; ++ complete(&i2c_bus->cmd_complete); ++ } ++ break; ++ case AST2600_I2CM_TX_ACK: ++ case AST2600_I2CM_TX_ACK | AST2600_I2CM_NORMAL_STOP: ++ if (i2c_bus->mode == DMA_MODE) ++ xfer_len = AST2600_I2C_GET_TX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CM_DMA_LEN_STS)); ++ else if (i2c_bus->mode == BUFF_MODE) ++ xfer_len = AST2600_I2CC_GET_TX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ else ++ xfer_len = 1; ++ ++ i2c_bus->controller_xfer_cnt += xfer_len; ++ ++ if (i2c_bus->controller_xfer_cnt == msg->len) { ++ if (i2c_bus->mode == DMA_MODE) { ++ dma_unmap_single(i2c_bus->dev, i2c_bus->controller_dma_addr, msg->len, ++ DMA_TO_DEVICE); ++ i2c_put_dma_safe_msg_buf(i2c_bus->controller_safe_buf, msg, true); ++ i2c_bus->controller_safe_buf = NULL; ++ } ++ i2c_bus->msgs_index++; ++ if (i2c_bus->msgs_index == i2c_bus->msgs_count) { ++ i2c_bus->cmd_err = i2c_bus->msgs_index; ++ complete(&i2c_bus->cmd_complete); ++ } else { ++ if (ast2600_i2c_do_start(i2c_bus)) { ++ i2c_bus->cmd_err = -ENOMEM; ++ complete(&i2c_bus->cmd_complete); ++ } ++ } ++ } else { ++ if (i2c_bus->mode == DMA_MODE) ++ ast2600_i2c_setup_dma_tx(0, i2c_bus); ++ else if (i2c_bus->mode == BUFF_MODE) ++ ast2600_i2c_setup_buff_tx(0, i2c_bus); ++ else ++ ast2600_i2c_setup_byte_tx(0, i2c_bus); ++ } ++ break; ++ case AST2600_I2CM_RX_DONE: ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ /* ++ * Workaround for controller/target package mode enable rx done stuck issue ++ * When controller go for first read (RX_DONE), target mode will also effect ++ * Then controller will send nack, not operate anymore. ++ */ ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS) & AST2600_I2CS_PKT_MODE_EN) { ++ u32 target_cmd = readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ ++ writel(0, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ writel(target_cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ } ++ fallthrough; ++#endif ++ case AST2600_I2CM_RX_DONE | AST2600_I2CM_NORMAL_STOP: ++ /* do next rx */ ++ if (i2c_bus->mode == DMA_MODE) { ++ xfer_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CM_DMA_LEN_STS)); ++ } else if (i2c_bus->mode == BUFF_MODE) { ++ xfer_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base + ++ AST2600_I2CC_BUFF_CTRL)); ++ for (i = 0; i < xfer_len; i++) ++ msg->buf[i2c_bus->controller_xfer_cnt + i] = ++ readb(i2c_bus->buf_base + 0x10 + i); ++ } else { ++ xfer_len = 1; ++ msg->buf[i2c_bus->controller_xfer_cnt] = ++ AST2600_I2CC_GET_RX_BUFF(readl(i2c_bus->reg_base + ++ AST2600_I2CC_STS_AND_BUFF)); ++ } ++ ++ if (msg->flags & I2C_M_RECV_LEN) { ++ u8 recv_len = 0; ++ ++ if (i2c_bus->version == AST2700) { ++ recv_len = AST2700_I2CC_GET_BUFF(readl(i2c_bus->reg_base ++ + BYTE_DATA_LOG)); ++ } else { ++ recv_len = AST2600_I2CC_GET_RX_BUFF(readl(i2c_bus->reg_base ++ + AST2600_I2CC_STS_AND_BUFF)); ++ } ++ ++ msg->len = min_t(unsigned int, recv_len, I2C_SMBUS_BLOCK_MAX); ++ msg->len += ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); ++ msg->flags &= ~I2C_M_RECV_LEN; ++ if (!recv_len) ++ i2c_bus->controller_xfer_cnt = 0; ++ else ++ i2c_bus->controller_xfer_cnt = 1; ++ } else { ++ i2c_bus->controller_xfer_cnt += xfer_len; ++ } ++ ++ if (i2c_bus->controller_xfer_cnt == msg->len) { ++ if (i2c_bus->mode == DMA_MODE) { ++ dma_unmap_single(i2c_bus->dev, i2c_bus->controller_dma_addr, msg->len, ++ DMA_FROM_DEVICE); ++ i2c_put_dma_safe_msg_buf(i2c_bus->controller_safe_buf, msg, true); ++ i2c_bus->controller_safe_buf = NULL; ++ } ++ ++ i2c_bus->msgs_index++; ++ if (i2c_bus->msgs_index == i2c_bus->msgs_count) { ++ i2c_bus->cmd_err = i2c_bus->msgs_index; ++ complete(&i2c_bus->cmd_complete); ++ } else { ++ if (ast2600_i2c_do_start(i2c_bus)) { ++ i2c_bus->cmd_err = -ENOMEM; ++ complete(&i2c_bus->cmd_complete); ++ } ++ } ++ } else { ++ if (i2c_bus->mode == DMA_MODE) ++ ast2600_i2c_setup_dma_rx(0, i2c_bus); ++ else if (i2c_bus->mode == BUFF_MODE) ++ ast2600_i2c_setup_buff_rx(0, i2c_bus); ++ else ++ ast2600_i2c_setup_byte_rx(0, i2c_bus); ++ } ++ break; ++ default: ++ dev_dbg(i2c_bus->dev, "unhandled sts %x\n", sts); ++ break; ++ } ++} ++ ++static int ast2600_i2c_controller_irq(struct ast2600_i2c_bus *i2c_bus) ++{ ++ u32 sts = readl(i2c_bus->reg_base + AST2600_I2CM_ISR); ++ u32 ier = readl(i2c_bus->reg_base + AST2600_I2CM_IER); ++ u32 ctrl; ++ ++ /* mask un-used isr bits */ ++ sts &= ~AST2600_I2CM_ISR_MASK; ++ ++ if (!i2c_bus->alert_enable) ++ sts &= ~AST2600_I2CM_SMBUS_ALT; ++ ++ if (AST2600_I2CM_BUS_RECOVER_FAIL & sts) { ++ writel(AST2600_I2CM_BUS_RECOVER_FAIL, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ ctrl = readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ i2c_bus->cmd_err = -EPROTO; ++ complete(&i2c_bus->cmd_complete); ++ return 1; ++ } ++ ++ if (AST2600_I2CM_BUS_RECOVER & sts) { ++ writel(AST2600_I2CM_BUS_RECOVER, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ i2c_bus->cmd_err = 0; ++ complete(&i2c_bus->cmd_complete); ++ return 1; ++ } ++ ++ if (AST2600_I2CM_SMBUS_ALT & sts) { ++ if (ier & AST2600_I2CM_SMBUS_ALT) { ++ /* Disable ALT INT */ ++ writel(ier & ~AST2600_I2CM_SMBUS_ALT, i2c_bus->reg_base + AST2600_I2CM_IER); ++ i2c_handle_smbus_alert(i2c_bus->ara); ++ writel(AST2600_I2CM_SMBUS_ALT, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ dev_err(i2c_bus->dev, ++ "ast2600_controller_alert_recv bus id %d, Disable Alt, Please Imple\n", ++ i2c_bus->adap.nr); ++ return 1; ++ } ++ } ++ ++ /* handle controller abnormal condition */ ++ if (i2c_bus->version == AST2700) { ++ i2c_bus->cmd_err = ast2700_i2c_irq_err_to_errno(sts); ++ if (i2c_bus->cmd_err) { ++ writel(sts, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ complete(&i2c_bus->cmd_complete); ++ return 1; ++ } ++ } else { ++ i2c_bus->cmd_err = ast2600_i2c_irq_err_to_errno(sts); ++ if (i2c_bus->cmd_err) { ++ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR); ++ complete(&i2c_bus->cmd_complete); ++ return 1; ++ } ++ } ++ ++ if (AST2600_I2CM_PKT_DONE & sts) { ++ ast2600_i2c_controller_package_irq(i2c_bus, sts); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t ast2600_i2c_bus_irq(int irq, void *dev_id) ++{ ++ struct ast2600_i2c_bus *i2c_bus = dev_id; ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ if (readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL) & AST2600_I2CC_SLAVE_EN) { ++ if (ast2600_i2c_target_irq(i2c_bus)) ++ return IRQ_HANDLED; ++ } ++#endif ++ return IRQ_RETVAL(ast2600_i2c_controller_irq(i2c_bus)); ++} ++ ++static int ast2600_i2c_controller_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) ++{ ++ struct ast2600_i2c_bus *i2c_bus = i2c_get_adapdata(adap); ++ unsigned long timeout; ++ int ret; ++ ++ if (!i2c_bus->multi_master && ++ (readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF) & AST2600_I2CC_BUS_BUSY_STS)) { ++ ret = ast2600_i2c_recover_bus(i2c_bus); ++ if (ret) ++ return ret; ++ } ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ if (i2c_bus->mode == BUFF_MODE) { ++ if (i2c_bus->target_operate) ++ return -EBUSY; ++ /* disable target isr */ ++ writel(0, i2c_bus->reg_base + AST2600_I2CS_IER); ++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR) || i2c_bus->target_operate) { ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_IER); ++ return -EBUSY; ++ } ++ } ++#endif ++ ++ i2c_bus->cmd_err = 0; ++ i2c_bus->msgs = msgs; ++ i2c_bus->msgs_index = 0; ++ i2c_bus->msgs_count = num; ++ reinit_completion(&i2c_bus->cmd_complete); ++ ret = ast2600_i2c_do_start(i2c_bus); ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ /* avoid race condication target is wait and controller wait 1st target operate */ ++ if (i2c_bus->mode == BUFF_MODE) ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_IER); ++#endif ++ if (ret) ++ goto controller_out; ++ timeout = wait_for_completion_timeout(&i2c_bus->cmd_complete, i2c_bus->adap.timeout); ++ if (timeout == 0) { ++ u32 ctrl = readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ ++ dev_dbg(i2c_bus->dev, "timeout isr[%x], sts[%x]\n", ++ readl(i2c_bus->reg_base + AST2600_I2CM_ISR), ++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF)); ++ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ if (i2c_bus->multi_master && ++ (readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF) & ++ AST2600_I2CC_BUS_BUSY_STS)) ++ ast2600_i2c_recover_bus(i2c_bus); ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ if (ctrl & AST2600_I2CC_SLAVE_EN) { ++ u32 cmd = TARGET_TRIGGER_CMD; ++ ++ if (i2c_bus->mode == DMA_MODE) { ++ cmd |= AST2600_I2CS_RX_DMA_EN; ++ writel(lower_32_bits(i2c_bus->target_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CS_RX_DMA); ++ writel(upper_32_bits(i2c_bus->target_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CS_RX_DMA_H); ++ writel(lower_32_bits(i2c_bus->target_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CS_TX_DMA); ++ writel(upper_32_bits(i2c_bus->target_dma_addr), ++ i2c_bus->reg_base + AST2600_I2CS_TX_DMA_H); ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ } else if (i2c_bus->mode == BUFF_MODE) { ++ cmd = TARGET_TRIGGER_CMD; ++ } else { ++ cmd &= ~AST2600_I2CS_PKT_MODE_EN; ++ } ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ } ++#endif ++ ret = -ETIMEDOUT; ++ } else { ++ ret = i2c_bus->cmd_err; ++ } ++ ++ dev_dbg(i2c_bus->dev, "bus%d-m: %d end\n", i2c_bus->adap.nr, i2c_bus->cmd_err); ++ ++controller_out: ++ if (i2c_bus->mode == DMA_MODE) { ++ /* still have controller_safe_buf need to be released */ ++ if (i2c_bus->controller_safe_buf) { ++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index]; ++ ++ if (msg->flags & I2C_M_RD) ++ dma_unmap_single(i2c_bus->dev, i2c_bus->controller_dma_addr, msg->len, ++ DMA_FROM_DEVICE); ++ else ++ dma_unmap_single(i2c_bus->dev, i2c_bus->controller_dma_addr, msg->len, ++ DMA_TO_DEVICE); ++ i2c_put_dma_safe_msg_buf(i2c_bus->controller_safe_buf, msg, true); ++ i2c_bus->controller_safe_buf = NULL; ++ } ++ } ++ ++ return ret; ++} ++ ++static void ast2600_i2c_init(struct ast2600_i2c_bus *i2c_bus) ++{ ++ struct platform_device *pdev = to_platform_device(i2c_bus->dev); ++ u32 fun_ctrl = AST2600_I2CC_BUS_AUTO_RELEASE | AST2600_I2CC_MASTER_EN; ++ ++ /* I2C Reset */ ++ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ ++ i2c_bus->multi_master = device_property_read_bool(&pdev->dev, "multi-master"); ++ if (!i2c_bus->multi_master) ++ fun_ctrl |= AST2600_I2CC_MULTI_MASTER_DIS; ++ ++ /* I2C Debounce level */ ++ if (i2c_bus->version != AST2600) { ++ if (!device_property_read_u32(&pdev->dev, "debounce-level", ++ &i2c_bus->debounce_level)) { ++ u32 debounce_level = 0; ++ ++ /* AST2700 support manual debounce setting */ ++ if (i2c_bus->version == AST2700) { ++ if (i2c_bus->debounce_level > AST2700_DEBOUNCE_LEVEL_MAX) ++ i2c_bus->debounce_level = AST2700_DEBOUNCE_LEVEL_MAX; ++ if (i2c_bus->debounce_level < AST2700_DEBOUNCE_LEVEL_MIN) ++ i2c_bus->debounce_level = AST2700_DEBOUNCE_LEVEL_MIN; ++ } ++ ++ debounce_level = readl(i2c_bus->reg_base + MSIC2_CONFIG) ++ & ~(AST2700_DEBOUNCE_MASK); ++ ++ debounce_level |= i2c_bus->debounce_level; ++ writel(debounce_level, i2c_bus->reg_base + MSIC2_CONFIG); ++ ++ fun_ctrl |= AST2700_I2CC_MANUAL_DEBOUNCE; ++ } ++ } ++ ++ /* Enable Controller Mode */ ++ writel(fun_ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ /* Disable Target Address */ ++ writel(0, i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL); ++ ++ /* Set AC Timing */ ++ if (i2c_bus->version == AST2700) ++ ast2700_i2c_ac_timing_config(i2c_bus); ++ else ++ ast2600_i2c_ac_timing_config(i2c_bus); ++ ++ /* Clear Interrupt */ ++ writel(GENMASK(27, 0), i2c_bus->reg_base + AST2600_I2CM_ISR); ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ /* for memory buffer initial */ ++ if (i2c_bus->mode == DMA_MODE) { ++ i2c_bus->target_dma_buf = ++ dmam_alloc_coherent(i2c_bus->dev, I2C_TARGET_MSG_BUF_SIZE, ++ &i2c_bus->target_dma_addr, GFP_KERNEL); ++ if (!i2c_bus->target_dma_buf) ++ return; ++ } ++ ++ writel(GENMASK(27, 0), i2c_bus->reg_base + AST2600_I2CS_ISR); ++ ++ if (i2c_bus->mode == BYTE_MODE) ++ writel(GENMASK(15, 0), i2c_bus->reg_base + AST2600_I2CS_IER); ++ else ++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_IER); ++#endif ++} ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++static int ast2600_i2c_reg_target(struct i2c_client *client) ++{ ++ struct ast2600_i2c_bus *i2c_bus = i2c_get_adapdata(client->adapter); ++ u32 cmd = TARGET_TRIGGER_CMD; ++ u32 target_addr = readl(i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL); ++ u32 ctrl = readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ bool target_reg = false; ++ u8 i = 0; ++ ++ /* check target client input and target counts*/ ++ if (!client || i2c_bus->target_attached == AST2600_I2C_TARGET_COUNT) ++ return -EINVAL; ++ ++ /* check duplicate address */ ++ for (i = 0; i < AST2600_I2C_TARGET_COUNT; i++) { ++ if (i2c_bus->multi_target[i]) { ++ if (i2c_bus->multi_target[i]->addr == client->addr) { ++ dev_dbg(i2c_bus->dev, "duplicate address [%x] on %d\n", client->addr, i); ++ return -EINVAL; ++ } ++ } ++ } ++ ++ /* assign the target address into array */ ++ for (i = 0; i < AST2600_I2C_TARGET_COUNT; i++) { ++ if (!i2c_bus->multi_target[i]) { ++ i2c_bus->multi_target[i] = client; ++ target_reg = true; ++ dev_dbg(i2c_bus->dev, "reg [%x] on %d\n", client->addr, i); ++ ++ /* set target addr by index */ ++ switch (i) { ++ case 0: ++ target_addr &= ~(AST2600_I2CS_ADDR1_MASK); ++ target_addr |= (AST2600_I2CS_ADDR1(client->addr) ++ | AST2600_I2CS_ADDR1_ENABLE); ++ break; ++ case 1: ++ target_addr &= ~(AST2600_I2CS_ADDR2_MASK); ++ target_addr |= (AST2600_I2CS_ADDR2(client->addr) ++ | AST2600_I2CS_ADDR2_ENABLE); ++ break; ++ case 2: ++ target_addr &= ~(AST2600_I2CS_ADDR3_MASK); ++ target_addr |= (AST2600_I2CS_ADDR3(client->addr) ++ | AST2600_I2CS_ADDR3_ENABLE); ++ break; ++ } ++ ++ /* Set target addr. */ ++ writel(target_addr, i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL); ++ break; ++ } ++ } ++ ++ /* don't reg target */ ++ if (!target_reg) ++ return -EINVAL; ++ ++ dev_dbg(i2c_bus->dev, "target addr %x\n", client->addr); ++ ++ /* turn on target mode */ ++ if (!(ctrl & AST2600_I2CC_SLAVE_EN)) { ++ writel(ctrl | AST2600_I2CC_SLAVE_EN, ++ i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ } ++ ++ /* trigger rx buffer */ ++ if (i2c_bus->mode == DMA_MODE) { ++ cmd |= AST2600_I2CS_RX_DMA_EN; ++ writel(lower_32_bits(i2c_bus->target_dma_addr), i2c_bus->reg_base + AST2600_I2CS_RX_DMA); ++ writel(upper_32_bits(i2c_bus->target_dma_addr), i2c_bus->reg_base + AST2600_I2CS_RX_DMA_H); ++ writel(lower_32_bits(i2c_bus->target_dma_addr), i2c_bus->reg_base + AST2600_I2CS_TX_DMA); ++ writel(upper_32_bits(i2c_bus->target_dma_addr), i2c_bus->reg_base + AST2600_I2CS_TX_DMA_H); ++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_TARGET_MSG_BUF_SIZE), ++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN); ++ } else if (i2c_bus->mode == BUFF_MODE) { ++ cmd = TARGET_TRIGGER_CMD; ++ } else { ++ cmd &= ~AST2600_I2CS_PKT_MODE_EN; ++ } ++ ++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS); ++ ++ i2c_bus->target_attached++; ++ return 0; ++} ++ ++static int ast2600_i2c_unreg_target(struct i2c_client *client) ++{ ++ struct ast2600_i2c_bus *i2c_bus = i2c_get_adapdata(client->adapter); ++ u32 target_addr = readl(i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL); ++ bool target_unreg = false; ++ u8 i = 0; ++ ++ /* check target client input and salve counts*/ ++ if (!client || i2c_bus->target_attached == 0) ++ return -EINVAL; ++ ++ /* remove the target call back from array */ ++ for (i = 0; i < AST2600_I2C_TARGET_COUNT; i++) { ++ if (i2c_bus->multi_target[i]) { ++ if (i2c_bus->multi_target[i]->addr == client->addr) { ++ i2c_bus->multi_target[i] = NULL; ++ target_unreg = true; ++ dev_dbg(i2c_bus->dev, "un-reg [%x] from %d\n", client->addr, i); ++ ++ /* remove target addr by index */ ++ switch (i) { ++ case 0: ++ target_addr &= ~(AST2600_I2CS_ADDR1_MASK | AST2600_I2CS_ADDR1_ENABLE); ++ break; ++ case 1: ++ target_addr &= ~(AST2600_I2CS_ADDR2_MASK | AST2600_I2CS_ADDR2_ENABLE); ++ break; ++ case 2: ++ target_addr &= ~(AST2600_I2CS_ADDR3_MASK | AST2600_I2CS_ADDR3_ENABLE); ++ break; ++ } ++ ++ writel(target_addr, i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL); ++ break; ++ } ++ } ++ } ++ ++ /* don't un-reg target */ ++ if (!target_unreg) ++ return -EINVAL; ++ ++ i2c_bus->target_attached--; ++ ++ /* Turn off target mode */ ++ if (i2c_bus->target_attached == 0x0) { ++ writel(~AST2600_I2CC_SLAVE_EN & readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL), ++ i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ } ++ ++ return 0; ++} ++#endif ++ ++static u32 ast2600_i2c_functionality(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; ++} ++ ++static const struct i2c_algorithm i2c_ast2600_algorithm = { ++ .xfer = ast2600_i2c_controller_xfer, ++ .functionality = ast2600_i2c_functionality, ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ .reg_target = ast2600_i2c_reg_target, ++ .unreg_target = ast2600_i2c_unreg_target, ++#endif ++}; ++ ++static int ast2600_i2c_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct ast2600_i2c_bus *i2c_bus; ++ const char *xfer_mode; ++ struct resource *res; ++ u32 global_ctrl; ++ int ret; ++ ++ i2c_bus = devm_kzalloc(dev, sizeof(*i2c_bus), GFP_KERNEL); ++ if (!i2c_bus) ++ return -ENOMEM; ++ ++ i2c_bus->version = (enum i2c_version)device_get_match_data(dev); ++ ++ i2c_bus->reg_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(i2c_bus->reg_base)) ++ return PTR_ERR(i2c_bus->reg_base); ++ ++ i2c_bus->rst = devm_reset_control_get_shared(dev, NULL); ++ if (IS_ERR(i2c_bus->rst)) ++ return dev_err_probe(dev, PTR_ERR(i2c_bus->rst), "Missing reset ctrl\n"); ++ ++ reset_control_deassert(i2c_bus->rst); ++ ++ i2c_bus->global_regs = ++ syscon_regmap_lookup_by_phandle(dev_of_node(dev), "aspeed,global-regs"); ++ if (IS_ERR(i2c_bus->global_regs)) ++ return PTR_ERR(i2c_bus->global_regs); ++ ++ regmap_read(i2c_bus->global_regs, AST2600_I2CG_CTRL, &global_ctrl); ++ if ((global_ctrl & AST2600_GLOBAL_INIT) != AST2600_GLOBAL_INIT) { ++ regmap_write(i2c_bus->global_regs, AST2600_I2CG_CTRL, AST2600_GLOBAL_INIT); ++ if (i2c_bus->version == AST2600) ++ regmap_write(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, AST2600_I2CCG_DIV_CTRL); ++ else ++ regmap_write(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, AST2700_I2CCG_DIV_CTRL); ++ } ++ ++#if IS_ENABLED(CONFIG_I2C_SLAVE) ++ i2c_bus->target_operate = 0; ++ i2c_bus->target_attached = 0; ++ for (int i = 0; i < AST2600_I2C_TARGET_COUNT; i++) ++ i2c_bus->multi_target[i] = NULL; ++#endif ++ i2c_bus->dev = dev; ++ if (i2c_bus->version == AST2600) { ++ i2c_bus->mode = BUFF_MODE; ++ ++ if (!device_property_read_string(dev, "aspeed,transfer-mode", &xfer_mode)) { ++ if (!strcmp(xfer_mode, "dma")) ++ i2c_bus->mode = DMA_MODE; ++ else if (!strcmp(xfer_mode, "byte")) ++ i2c_bus->mode = BYTE_MODE; ++ else ++ i2c_bus->mode = BUFF_MODE; ++ } ++ ++ if (i2c_bus->mode == BUFF_MODE) { ++ i2c_bus->buf_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res); ++ if (IS_ERR(i2c_bus->buf_base)) ++ i2c_bus->mode = BYTE_MODE; ++ else ++ i2c_bus->buf_size = resource_size(res) / 2; ++ } ++ } else { ++ i2c_bus->mode = DMA_MODE; ++ } ++ ++ /* ++ * i2c timeout counter: use base clk4 1Mhz, ++ * per unit: 1/(1000/1024) = 1024us ++ * lower than 1024 us will be set 1 ms. ++ */ ++ ret = device_property_read_u32(dev, "i2c-scl-clk-low-timeout-us", &i2c_bus->timeout); ++ if (!ret) { ++ i2c_bus->timeout /= 1024; ++ ++ if (!i2c_bus->timeout) ++ i2c_bus->timeout = 1; ++ } ++ ++ init_completion(&i2c_bus->cmd_complete); ++ ++ i2c_bus->irq = platform_get_irq(pdev, 0); ++ if (i2c_bus->irq < 0) ++ return i2c_bus->irq; ++ ++ platform_set_drvdata(pdev, i2c_bus); ++ ++ i2c_bus->clk = devm_clk_get(i2c_bus->dev, NULL); ++ if (IS_ERR(i2c_bus->clk)) ++ return dev_err_probe(i2c_bus->dev, PTR_ERR(i2c_bus->clk), "Can't get clock\n"); ++ ++ i2c_bus->apb_clk = clk_get_rate(i2c_bus->clk); ++ ++ i2c_parse_fw_timings(i2c_bus->dev, &i2c_bus->timing_info, true); ++ ++ /* Initialize the I2C adapter */ ++ i2c_bus->adap.owner = THIS_MODULE; ++ i2c_bus->adap.algo = &i2c_ast2600_algorithm; ++ i2c_bus->adap.retries = 0; ++ i2c_bus->adap.dev.parent = i2c_bus->dev; ++ device_set_node(&i2c_bus->adap.dev, dev_fwnode(dev)); ++ i2c_bus->adap.algo_data = i2c_bus; ++ strscpy(i2c_bus->adap.name, pdev->name, sizeof(i2c_bus->adap.name)); ++ i2c_set_adapdata(&i2c_bus->adap, i2c_bus); ++ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ ++ ast2600_i2c_init(i2c_bus); ++ ++ ret = devm_request_irq(dev, i2c_bus->irq, ast2600_i2c_bus_irq, 0, ++ dev_name(dev), i2c_bus); ++ if (ret < 0) ++ return dev_err_probe(dev, ret, "Unable to request irq %d\n", i2c_bus->irq); ++ ++ /* Set interrupt generation of i2c controller */ ++ writel(AST2600_I2CM_PKT_DONE | AST2600_I2CM_BUS_RECOVER, ++ i2c_bus->reg_base + AST2600_I2CM_IER); ++ ++ ret = devm_i2c_add_adapter(dev, &i2c_bus->adap); ++ if (ret) ++ return ret; ++ ++ i2c_bus->alert_enable = device_property_read_bool(dev, "smbus-alert"); ++ if (i2c_bus->alert_enable) { ++ i2c_bus->ara = i2c_new_smbus_alert_device(&i2c_bus->adap, &i2c_bus->alert_data); ++ if (!i2c_bus->ara) ++ dev_warn(dev, "Failed to register ARA client\n"); ++ else ++ writel(AST2600_I2CM_PKT_DONE | AST2600_I2CM_BUS_RECOVER | ++ AST2600_I2CM_SMBUS_ALT, ++ i2c_bus->reg_base + AST2600_I2CM_IER); ++ } else { ++ i2c_bus->alert_enable = false; ++ } ++ ++ dev_info(dev, "%s [%d]: adapter [%d KHz] mode [%d] version [%d]\n", ++ dev->of_node->name, i2c_bus->adap.nr, i2c_bus->timing_info.bus_freq_hz / 1000, ++ i2c_bus->mode, i2c_bus->version); ++ ++ return 0; ++} ++ ++static void ast2600_i2c_remove(struct platform_device *pdev) ++{ ++ struct ast2600_i2c_bus *i2c_bus = platform_get_drvdata(pdev); ++ ++ /* Disable everything. */ ++ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL); ++ writel(0, i2c_bus->reg_base + AST2600_I2CM_IER); ++} ++ ++static const struct of_device_id ast2600_i2c_bus_of_table[] = { ++ { .compatible = "aspeed,ast2600-i2cv2", .data = (const void *)AST2600, }, ++ { .compatible = "aspeed,ast2700-i2c", .data = (const void *)AST2700, }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, ast2600_i2c_bus_of_table); ++ ++static struct platform_driver ast2600_i2c_bus_driver = { ++ .probe = ast2600_i2c_probe, ++ .remove_new = ast2600_i2c_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = ast2600_i2c_bus_of_table, ++ }, ++}; ++ ++module_platform_driver(ast2600_i2c_bus_driver); ++ ++MODULE_AUTHOR("Ryan Chen "); ++MODULE_DESCRIPTION("ASPEED AST2600 I2C Controller Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c +--- a/drivers/i3c/master/ast2600-i3c-master.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/i3c/master/ast2600-i3c-master.c 2025-12-23 10:16:21.088033273 +0000 +@@ -10,6 +10,8 @@ + #include + #include + #include ++#include ++#include + + #include "dw-i3c-master.h" + +@@ -33,18 +35,93 @@ + #define AST2600_I3CG_REG1_SA_EN BIT(15) + #define AST2600_I3CG_REG1_INST_ID_MASK GENMASK(19, 16) + #define AST2600_I3CG_REG1_INST_ID(x) (((x) << 16) & AST2600_I3CG_REG1_INST_ID_MASK) ++#define SCL_SW_MODE_OE BIT(20) ++#define SCL_OUT_SW_MODE_VAL BIT(21) ++#define SCL_IN_SW_MODE_VAL BIT(23) ++#define SDA_SW_MODE_OE BIT(24) ++#define SDA_OUT_SW_MODE_VAL BIT(25) ++#define SDA_IN_SW_MODE_VAL BIT(27) ++#define SCL_IN_SW_MODE_EN BIT(28) ++#define SDA_IN_SW_MODE_EN BIT(29) ++#define SCL_OUT_SW_MODE_EN BIT(30) ++#define SDA_OUT_SW_MODE_EN BIT(31) + + #define AST2600_DEFAULT_SDA_PULLUP_OHMS 2000 + ++#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31) ++#define DEV_ADDR_TABLE_DYNAMIC_ADDR GENMASK(23, 16) ++#define DEV_ADDR_TABLE_IBI_ADDR_MASK GENMASK(25, 24) ++#define IBI_ADDR_MASK_OFF 0b00 ++#define IBI_ADDR_MASK_LAST_3BITS 0b01 ++#define IBI_ADDR_MASK_LAST_4BITS 0b10 ++#define DEV_ADDR_TABLE_DA_PARITY BIT(23) ++#define DEV_ADDR_TABLE_MR_REJECT BIT(14) ++#define DEV_ADDR_TABLE_SIR_REJECT BIT(13) ++#define DEV_ADDR_TABLE_IBI_MDB BIT(12) + #define DEV_ADDR_TABLE_IBI_PEC BIT(11) ++#define DEV_ADDR_TABLE_STATIC_ADDR GENMASK(6, 0) ++ ++#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2)) ++ ++#define DEVICE_CTRL 0x0 ++#define DEV_CTRL_SLAVE_MDB GENMASK(23, 16) ++#define DEV_CTRL_HOT_JOIN_NACK BIT(8) ++ ++#define NUM_OF_SWDATS_IN_GROUP 8 ++#define ALL_DATS_IN_GROUP_ARE_FREE ((1 << NUM_OF_SWDATS_IN_GROUP) - 1) ++#define NUM_OF_SWDAT_GROUP 16 ++ ++#define ADDR_GRP_MASK GENMASK(6, 3) ++#define ADDR_GRP(x) (((x) & ADDR_GRP_MASK) >> 3) ++#define ADDR_HID_MASK GENMASK(2, 0) ++#define ADDR_HID(x) ((x) & ADDR_HID_MASK) ++ ++#define IBI_QUEUE_STATUS 0x18 ++ ++#define IBI_SIR_REQ_REJECT 0x30 ++#define INTR_STATUS_EN 0x40 ++#define INTR_SIGNAL_EN 0x44 ++#define INTR_IBI_THLD_STAT BIT(2) ++ ++#define PRESENT_STATE 0x54 ++#define CM_TFR_STS GENMASK(13, 8) ++#define CM_TFR_STS_MASTER_SERV_IBI 0xe ++#define SDA_LINE_SIGNAL_LEVEL BIT(1) ++#define SCL_LINE_SIGNAL_LEVEL BIT(0) ++ ++struct ast2600_i3c_swdat_group { ++ u32 dat[NUM_OF_SWDATS_IN_GROUP]; ++ u32 free_pos; ++ int hw_index; ++ struct { ++ u32 set; ++ u32 clr; ++ } mask; ++}; + + struct ast2600_i3c { + struct dw_i3c_master dw; + struct regmap *global_regs; + unsigned int global_idx; + unsigned int sda_pullup; ++ ++ struct ast2600_i3c_swdat_group dat_group[NUM_OF_SWDAT_GROUP]; + }; + ++static u8 even_parity(u8 p) ++{ ++ p ^= p >> 4; ++ p &= 0xf; ++ ++ return (0x9669 >> p) & 1; ++} ++ ++static inline struct dw_i3c_master * ++to_dw_i3c_master(struct i3c_master_controller *master) ++{ ++ return container_of(master, struct dw_i3c_master, base); ++} ++ + static struct ast2600_i3c *to_ast2600_i3c(struct dw_i3c_master *dw) + { + return container_of(dw, struct ast2600_i3c, dw); +@@ -117,9 +194,537 @@ + } + } + ++static void ast2600_i3c_enter_sw_mode(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_VAL | SDA_IN_SW_MODE_VAL, ++ SCL_IN_SW_MODE_VAL | SDA_IN_SW_MODE_VAL); ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN, ++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN); ++} ++ ++static void ast2600_i3c_exit_sw_mode(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN, 0); ++} ++ ++static void ast2600_i3c_toggle_scl_in(struct dw_i3c_master *dw, int count) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ ++ for (; count; count--) { ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_VAL, SCL_IN_SW_MODE_VAL); ++ } ++} ++ ++static void ast2600_i3c_gen_internal_stop(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_IN_SW_MODE_VAL, SCL_IN_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_VAL, SDA_IN_SW_MODE_VAL); ++} ++ ++static int aspeed_i3c_bus_recovery(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ int i, ret = -1; ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_SW_MODE_OE, SCL_SW_MODE_OE); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_EN, SCL_OUT_SW_MODE_EN); ++ ++ for (i = 0; i < 19; i++) { ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL); ++ if (readl(dw->regs + PRESENT_STATE) & SDA_LINE_SIGNAL_LEVEL) { ++ ret = 0; ++ break; ++ } ++ } ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_EN, 0); ++ if (ret) ++ dev_err(&dw->base.dev, "Failed to recover the bus\n"); ++ ++ return ret; ++} ++ ++static void ast2600_i3c_gen_target_reset_pattern(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ int i; ++ ++ if (dw->base.bus.context == I3C_BUS_CONTEXT_JESD403) { ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_SW_MODE_OE, SCL_SW_MODE_OE); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_EN, SCL_OUT_SW_MODE_EN); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, 0); ++ mdelay(DIV_ROUND_UP(dw->timing.timed_reset_scl_low_ns, ++ 1000000)); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_EN, 0); ++ return; ++ } ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_VAL | SCL_OUT_SW_MODE_VAL, ++ SDA_OUT_SW_MODE_VAL | SCL_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_SW_MODE_OE | SCL_SW_MODE_OE, ++ SDA_SW_MODE_OE | SCL_SW_MODE_OE); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN, ++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_VAL | SCL_IN_SW_MODE_VAL, ++ SDA_IN_SW_MODE_VAL | SCL_IN_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN, ++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, 0); ++ for (i = 0; i < 7; i++) { ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, ++ AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_VAL, SDA_OUT_SW_MODE_VAL); ++ } ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_VAL, 0); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_VAL, SDA_OUT_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN, 0); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN, 0); ++} ++ ++static bool ast2600_i3c_fsm_exit_serv_ibi(struct dw_i3c_master *dw) ++{ ++ u32 state; ++ ++ /* ++ * Clear the IBI queue to enable the hardware to generate SCL and ++ * begin detecting the T-bit low to stop reading IBI data. ++ */ ++ readl(dw->regs + IBI_QUEUE_STATUS); ++ state = FIELD_GET(CM_TFR_STS, readl(dw->regs + PRESENT_STATE)); ++ if (state == CM_TFR_STS_MASTER_SERV_IBI) ++ return false; ++ ++ return true; ++} ++ ++static void ast2600_i3c_gen_tbits_in(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ bool is_idle; ++ int ret; ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_VAL, SDA_IN_SW_MODE_VAL); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_EN, SDA_IN_SW_MODE_EN); ++ ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_VAL, 0); ++ ret = readx_poll_timeout_atomic(ast2600_i3c_fsm_exit_serv_ibi, dw, ++ is_idle, is_idle, 0, 2000000); ++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx), ++ SDA_IN_SW_MODE_EN, 0); ++ if (ret) ++ dev_err(&dw->base.dev, ++ "Failed to exit the I3C fsm from %lx(MASTER_SERV_IBI): %d", ++ FIELD_GET(CM_TFR_STS, readl(dw->regs + PRESENT_STATE)), ++ ret); ++} ++ ++static void ast2600_i3c_set_ibi_mdb(struct dw_i3c_master *dw, u8 mdb) ++{ ++ u32 reg; ++ ++ reg = readl(dw->regs + DEVICE_CTRL); ++ reg &= ~DEV_CTRL_SLAVE_MDB; ++ reg |= FIELD_PREP(DEV_CTRL_SLAVE_MDB, mdb); ++ writel(reg, dw->regs + DEVICE_CTRL); ++} ++ ++static int ast2600_i3c_get_free_hw_pos(struct dw_i3c_master *dw) ++{ ++ if (!(dw->free_pos & GENMASK(dw->maxdevs - 1, 0))) ++ return -ENOSPC; ++ ++ return ffs(dw->free_pos) - 1; ++} ++ ++static void ast2600_i3c_init_swdat(struct dw_i3c_master *dw) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp; ++ int i, j; ++ u32 def_set, def_clr; ++ ++ def_clr = DEV_ADDR_TABLE_IBI_ADDR_MASK; ++ def_set = DEV_ADDR_TABLE_MR_REJECT | DEV_ADDR_TABLE_SIR_REJECT; ++ ++ for (i = 0; i < NUM_OF_SWDAT_GROUP; i++) { ++ gp = &i3c->dat_group[i]; ++ gp->hw_index = -1; ++ gp->free_pos = ALL_DATS_IN_GROUP_ARE_FREE; ++ gp->mask.clr = def_clr; ++ gp->mask.set = def_set; ++ ++ for (j = 0; j < NUM_OF_SWDATS_IN_GROUP; j++) ++ gp->dat[j] = 0; ++ } ++ ++ for (i = 0; i < dw->maxdevs; i++) ++ writel(def_set, ++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, i)); ++} ++ ++static int ast2600_i3c_set_swdat(struct dw_i3c_master *dw, u8 addr, u32 val) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)]; ++ int pos = ADDR_HID(addr); ++ ++ if (!(val & DEV_ADDR_TABLE_LEGACY_I2C_DEV)) { ++ /* Calculate DA parity for I3C devices */ ++ val &= ~DEV_ADDR_TABLE_DA_PARITY; ++ val |= FIELD_PREP(DEV_ADDR_TABLE_DA_PARITY, even_parity(addr)); ++ } ++ gp->dat[pos] = val; ++ ++ if (val) { ++ gp->free_pos &= ~BIT(pos); ++ ++ /* ++ * reserve the hw dat resource for the first member of the ++ * group. all the members in the group share the same hw dat. ++ */ ++ if (gp->hw_index == -1) { ++ gp->hw_index = ast2600_i3c_get_free_hw_pos(dw); ++ if (gp->hw_index < 0) ++ goto out; ++ ++ dw->free_pos &= ~BIT(gp->hw_index); ++ val &= ~gp->mask.clr; ++ val |= gp->mask.set; ++ writel(val, ++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, ++ gp->hw_index)); ++ } ++ } else { ++ gp->free_pos |= BIT(pos); ++ ++ /* ++ * release the hw dat resource if all the members in the group ++ * are free. ++ */ ++ if (gp->free_pos == ALL_DATS_IN_GROUP_ARE_FREE) { ++ writel(gp->mask.set, ++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, ++ gp->hw_index)); ++ dw->free_pos |= BIT(gp->hw_index); ++ gp->hw_index = -1; ++ } ++ } ++out: ++ return gp->hw_index; ++} ++ ++static u32 ast2600_i3c_get_swdat(struct dw_i3c_master *dw, u8 addr) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)]; ++ ++ return gp->dat[ADDR_HID(addr)]; ++} ++ ++static int ast2600_i3c_flush_swdat(struct dw_i3c_master *dw, u8 addr) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)]; ++ u32 dat = gp->dat[ADDR_HID(addr)]; ++ int hw_index = gp->hw_index; ++ ++ if (!dat || hw_index < 0) ++ return -1; ++ ++ dat &= ~gp->mask.clr; ++ dat |= gp->mask.set; ++ writel(dat, dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, hw_index)); ++ ++ return 0; ++} ++ ++static int ast2600_i3c_get_swdat_hw_pos(struct dw_i3c_master *dw, u8 addr) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)]; ++ ++ return gp->hw_index; ++} ++ ++static int ast2600_i3c_reattach_i3c_dev(struct i3c_dev_desc *dev, ++ u8 old_dyn_addr) ++{ ++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); ++ struct i3c_master_controller *m = i3c_dev_get_master(dev); ++ struct dw_i3c_master *master = to_dw_i3c_master(m); ++ u32 dat = FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, dev->info.dyn_addr); ++ ++ if (old_dyn_addr != dev->info.dyn_addr) ++ ast2600_i3c_set_swdat(master, old_dyn_addr, 0); ++ ++ ast2600_i3c_set_swdat(master, dev->info.dyn_addr, dat); ++ data->index = ast2600_i3c_get_swdat_hw_pos(master, dev->info.dyn_addr); ++ master->devs[dev->info.dyn_addr].addr = dev->info.dyn_addr; ++ ++ return 0; ++} ++ ++static int ast2600_i3c_attach_i3c_dev(struct i3c_dev_desc *dev) ++{ ++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); ++ struct i3c_master_controller *m = i3c_dev_get_master(dev); ++ struct dw_i3c_master *master = to_dw_i3c_master(m); ++ int pos; ++ u8 addr = dev->info.dyn_addr ?: dev->info.static_addr; ++ ++ pos = ast2600_i3c_set_swdat(master, addr, ++ FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, addr)); ++ if (pos < 0) ++ return pos; ++ ++ data = kzalloc(sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ data->index = ast2600_i3c_get_swdat_hw_pos(master, addr); ++ master->devs[addr].addr = addr; ++ i3c_dev_set_master_data(dev, data); ++ ++ if (master->base.bus.context == I3C_BUS_CONTEXT_JESD403) { ++ dev->info.max_write_ds = 0; ++ dev->info.max_read_ds = 0; ++ } ++ ++ return 0; ++} ++ ++static void ast2600_i3c_detach_i3c_dev(struct i3c_dev_desc *dev) ++{ ++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); ++ struct i3c_master_controller *m = i3c_dev_get_master(dev); ++ struct dw_i3c_master *master = to_dw_i3c_master(m); ++ u8 addr = dev->info.dyn_addr ?: dev->info.static_addr; ++ ++ ast2600_i3c_set_swdat(master, addr, 0); ++ ++ i3c_dev_set_master_data(dev, NULL); ++ master->devs[addr].addr = 0; ++ kfree(data); ++} ++ ++static int ast2600_i3c_attach_i2c_dev(struct i2c_dev_desc *dev) ++{ ++ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); ++ struct i3c_master_controller *m = i2c_dev_get_master(dev); ++ struct dw_i3c_master *master = to_dw_i3c_master(m); ++ int pos; ++ ++ pos = ast2600_i3c_set_swdat(master, dev->addr, ++ DEV_ADDR_TABLE_LEGACY_I2C_DEV | ++ FIELD_PREP(DEV_ADDR_TABLE_STATIC_ADDR, dev->addr)); ++ if (pos < 0) ++ return pos; ++ ++ data = kzalloc(sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ ++ data->index = ast2600_i3c_get_swdat_hw_pos(master, dev->addr); ++ master->devs[dev->addr].addr = dev->addr; ++ i2c_dev_set_master_data(dev, data); ++ ++ return 0; ++} ++ ++static void ast2600_i3c_detach_i2c_dev(struct i2c_dev_desc *dev) ++{ ++ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev); ++ struct i3c_master_controller *m = i2c_dev_get_master(dev); ++ struct dw_i3c_master *master = to_dw_i3c_master(m); ++ ++ ast2600_i3c_set_swdat(master, dev->addr, 0); ++ ++ i2c_dev_set_master_data(dev, NULL); ++ master->devs[dev->addr].addr = 0; ++ kfree(data); ++} ++ ++static void ast2600_i3c_set_sir_enabled(struct dw_i3c_master *dw, ++ struct i3c_dev_desc *dev, u8 idx, ++ bool enable) ++{ ++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw); ++ struct ast2600_i3c_swdat_group *gp = ++ &i3c->dat_group[ADDR_GRP(dev->info.dyn_addr)]; ++ unsigned long flags; ++ u32 reg; ++ bool global; ++ ++ spin_lock_irqsave(&dw->devs_lock, flags); ++ if (enable) { ++ gp->mask.clr |= DEV_ADDR_TABLE_SIR_REJECT | ++ DEV_ADDR_TABLE_IBI_ADDR_MASK; ++ ++ gp->mask.set &= ~DEV_ADDR_TABLE_SIR_REJECT; ++ gp->mask.set |= FIELD_PREP(DEV_ADDR_TABLE_IBI_ADDR_MASK, ++ IBI_ADDR_MASK_LAST_3BITS); ++ /* ++ * The ast2600 i3c controller will lock up on receiving 4n+1-byte IBIs ++ * if the PEC is disabled. We have no way to restrict the length of ++ * IBIs sent to the controller, so we need to unconditionally enable ++ * PEC checking, which means we drop a byte of payload data ++ */ ++ gp->mask.set |= DEV_ADDR_TABLE_IBI_PEC; ++ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) ++ gp->mask.set |= DEV_ADDR_TABLE_IBI_MDB; ++ } else { ++ reg = ast2600_i3c_get_swdat(dw, dev->info.dyn_addr); ++ reg |= DEV_ADDR_TABLE_SIR_REJECT; ++ ast2600_i3c_set_swdat(dw, dev->info.dyn_addr, reg); ++ } ++ ++ reg = readl(dw->regs + IBI_SIR_REQ_REJECT); ++ if (enable) { ++ global = reg == 0xffffffff; ++ reg &= ~BIT(gp->hw_index); ++ } else { ++ int i; ++ bool hj_rejected = !!(readl(dw->regs + DEVICE_CTRL) & ++ DEV_CTRL_HOT_JOIN_NACK); ++ bool ibi_enable = false; ++ ++ for (i = 0; i < NUM_OF_SWDATS_IN_GROUP; i++) { ++ if (!(gp->dat[i] & DEV_ADDR_TABLE_SIR_REJECT)) { ++ ibi_enable = true; ++ break; ++ } ++ } ++ ++ if (!ibi_enable) { ++ reg |= BIT(gp->hw_index); ++ global = (reg == 0xffffffff) && hj_rejected; ++ ++ gp->mask.set = DEV_ADDR_TABLE_SIR_REJECT; ++ } ++ } ++ writel(reg, dw->regs + IBI_SIR_REQ_REJECT); ++ ++ if (global) { ++ reg = readl(dw->regs + INTR_STATUS_EN); ++ reg &= ~INTR_IBI_THLD_STAT; ++ if (enable) ++ reg |= INTR_IBI_THLD_STAT; ++ writel(reg, dw->regs + INTR_STATUS_EN); ++ ++ reg = readl(dw->regs + INTR_SIGNAL_EN); ++ reg &= ~INTR_IBI_THLD_STAT; ++ if (enable) ++ reg |= INTR_IBI_THLD_STAT; ++ writel(reg, dw->regs + INTR_SIGNAL_EN); ++ } ++ ++ ast2600_i3c_flush_swdat(dw, dev->info.dyn_addr); ++ ++ spin_unlock_irqrestore(&dw->devs_lock, flags); ++} ++ ++static void ast2600_i3c_set_ibi_dev(struct dw_i3c_master *dw, ++ struct i3c_dev_desc *dev) ++{ ++ dw->devs[dev->info.dyn_addr].ibi_dev = dev; ++} ++ ++static void ast2600_i3c_unset_ibi_dev(struct dw_i3c_master *dw, ++ struct i3c_dev_desc *dev) ++{ ++ dw->devs[dev->info.dyn_addr].ibi_dev = NULL; ++} ++ ++static struct i3c_dev_desc *ast2600_i3c_get_ibi_dev(struct dw_i3c_master *dw, ++ u8 addr) ++{ ++ return dw->devs[addr].ibi_dev; ++} ++ + static const struct dw_i3c_platform_ops ast2600_i3c_ops = { + .init = ast2600_i3c_init, + .set_dat_ibi = ast2600_i3c_set_dat_ibi, ++ .enter_sw_mode = ast2600_i3c_enter_sw_mode, ++ .exit_sw_mode = ast2600_i3c_exit_sw_mode, ++ .toggle_scl_in = ast2600_i3c_toggle_scl_in, ++ .gen_internal_stop = ast2600_i3c_gen_internal_stop, ++ .gen_target_reset_pattern = ast2600_i3c_gen_target_reset_pattern, ++ .gen_tbits_in = ast2600_i3c_gen_tbits_in, ++ .bus_recovery = aspeed_i3c_bus_recovery, ++ .set_ibi_mdb = ast2600_i3c_set_ibi_mdb, ++ .reattach_i3c_dev = ast2600_i3c_reattach_i3c_dev, ++ .attach_i3c_dev = ast2600_i3c_attach_i3c_dev, ++ .detach_i3c_dev = ast2600_i3c_detach_i3c_dev, ++ .attach_i2c_dev = ast2600_i3c_attach_i2c_dev, ++ .detach_i2c_dev = ast2600_i3c_detach_i2c_dev, ++ .get_addr_pos = ast2600_i3c_get_swdat_hw_pos, ++ .flush_dat = ast2600_i3c_flush_swdat, ++ .set_sir_enabled = ast2600_i3c_set_sir_enabled, ++ .set_ibi_dev = ast2600_i3c_set_ibi_dev, ++ .unset_ibi_dev = ast2600_i3c_unset_ibi_dev, ++ .get_ibi_dev = ast2600_i3c_get_ibi_dev, + }; + + static int ast2600_i3c_probe(struct platform_device *pdev) +@@ -156,6 +761,10 @@ + i3c->sda_pullup); + + i3c->dw.platform_ops = &ast2600_i3c_ops; ++ i3c->dw.base.pec_supported = true; ++ ++ ast2600_i3c_init_swdat(&i3c->dw); ++ + return dw_i3c_common_probe(&i3c->dw, pdev); + } + +diff --git a/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h b/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h +--- a/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h 2025-12-23 10:16:21.082033373 +0000 +@@ -0,0 +1,413 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++#ifndef VENDOR_ASPEED_H ++#define VENDOR_ASPEED_H ++ ++/* Aspeed in-house register */ ++#include "linux/bitfield.h" ++#define ast_inhouse_read(r) readl(hci->INHOUSE_regs + (r)) ++#define ast_inhouse_write(r, v) writel(v, hci->INHOUSE_regs + (r)) ++ ++#define ASPEED_I3C_CTRL 0x0 ++#define ASPEED_I3C_CTRL_STOP_QUEUE_PT BIT(31) //Stop the queue read pointer. ++#define ASPEED_I3C_CTRL_INIT BIT(4) ++#define ASPEED_I3C_CTRL_INIT_MODE GENMASK(1, 0) ++#define INIT_MST_MODE 0 ++#define INIT_SEC_MST_MODE 1 ++#define INIT_SLV_MODE 2 ++ ++#define ASPEED_I3C_STS 0x4 ++#define ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS_VALID BIT(23) ++#define ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS GENMASK(22, 16) ++#define ASPEED_I3C_STS_MODE_PURE_SLV BIT(8) ++#define ASPEED_I3C_STS_MODE_SECONDARY_SLV_TO_MST BIT(7) ++#define ASPEED_I3C_STS_MODE_SECONDARY_MST_TO_SLV BIT(6) ++#define ASPEED_I3C_STS_MODE_SECONDARY_SLV BIT(5) ++#define ASPEED_I3C_STS_MODE_SECONDARY_MST BIT(4) ++#define ASPEED_I3C_STS_MODE_PRIMARY_SLV_TO_MST BIT(3) ++#define ASPEED_I3C_STS_MODE_PRIMARY_MST_TO_SLV BIT(2) ++#define ASPEED_I3C_STS_MODE_PRIMARY_SLV BIT(1) ++#define ASPEED_I3C_STS_MODE_PRIMARY_MST BIT(0) ++ ++#define ASPEED_I3C_DAA_INDEX0 0x10 ++#define ASPEED_I3C_DAA_INDEX1 0x14 ++#define ASPEED_I3C_DAA_INDEX2 0x18 ++#define ASPEED_I3C_DAA_INDEX3 0x1C ++ ++#define ASPEED_I3C_AUTOCMD_0 0x20 ++#define ASPEED_I3C_AUTOCMD_1 0x24 ++#define ASPEED_I3C_AUTOCMD_2 0x28 ++#define ASPEED_I3C_AUTOCMD_3 0x2C ++#define ASPEED_I3C_AUTOCMD_4 0x30 ++#define ASPEED_I3C_AUTOCMD_5 0x34 ++#define ASPEED_I3C_AUTOCMD_6 0x38 ++#define ASPEED_I3C_AUTOCMD_7 0x3C ++ ++#define ASPEED_I3C_AUTOCMD_SEL_0_7 0x40 ++#define ASPEED_I3C_AUTOCMD_SEL_8_15 0x44 ++#define ASPEED_I3C_AUTOCMD_SEL_16_23 0x48 ++#define ASPEED_I3C_AUTOCMD_SEL_24_31 0x4C ++#define ASPEED_I3C_AUTOCMD_SEL_32_39 0x50 ++#define ASPEED_I3C_AUTOCMD_SEL_40_47 0x54 ++#define ASPEED_I3C_AUTOCMD_SEL_48_55 0x58 ++#define ASPEED_I3C_AUTOCMD_SEL_56_63 0x5C ++#define ASPEED_I3C_AUTOCMD_SEL_64_71 0x60 ++#define ASPEED_I3C_AUTOCMD_SEL_72_79 0x64 ++#define ASPEED_I3C_AUTOCMD_SEL_80_87 0x68 ++#define ASPEED_I3C_AUTOCMD_SEL_88_95 0x6C ++#define ASPEED_I3C_AUTOCMD_SEL_96_103 0x70 ++#define ASPEED_I3C_AUTOCMD_SEL_104_111 0x74 ++#define ASPEED_I3C_AUTOCMD_SEL_112_119 0x78 ++#define ASPEED_I3C_AUTOCMD_SEL_120_127 0x7C ++ ++#define ASPEED_I3C_SLV_CHAR_CTRL 0xA0 ++#define ASPEED_I3C_SLV_CHAR_CTRL_DCR GENMASK(23, 16) ++#define ASPEED_I3C_SLV_CHAR_CTRL_BCR GENMASK(15, 8) ++#define SLV_BCR_DEVICE_ROLE GENMASK(7, 6) ++#define ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR_EN BIT(7) ++#define ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR GENMASK(6, 0) ++#define SLV_PID_HI(x) (((x) >> 32) & GENMASK(15, 0)) ++#define SLV_PID_LO(x) ((x) & GENMASK(31, 0)) ++#define ASPEED_I3C_SLV_PID_LO 0xA4 ++#define ASPEED_I3C_SLV_PID_HI 0xA8 ++#define ASPEED_I3C_SLV_FSM 0xAC ++#define ASPEED_I3C_SLV_CAP_CTRL 0xB0 ++#define ASPEED_I3C_SLV_CAP_CTRL_PEC_EN BIT(31) ++#define ASPEED_I3C_SLV_CAP_CTRL_HAIT_IF_IBI_ERR BIT(30) ++#define ASPEED_I3C_SLV_CAP_CTRL_ACCEPT_CR BIT(16) ++#define ASPEED_I3C_SLV_CAP_CTRL_HJ_REQ BIT(10) ++#define ASPEED_I3C_SLV_CAP_CTRL_MR_REQ BIT(9) ++#define ASPEED_I3C_SLV_CAP_CTRL_IBI_REQ BIT(8) ++#define ASPEED_I3C_SLV_CAP_CTRL_HJ_WAIT BIT(6) ++#define ASPEED_I3C_SLV_CAP_CTRL_MR_WAIT BIT(5) ++#define ASPEED_I3C_SLV_CAP_CTRL_IBI_WAIT BIT(4) ++#define ASPEED_I3C_SLV_CAP_CTRL_NOTSUP_DEF_BYTE BIT(1) ++#define ASPEED_I3C_SLV_CAP_CTRL_I2C_DEV BIT(0) ++/* CCC related registers */ ++#define ASPEED_I3C_SLV_STS1 0xB4 ++#define ASPEED_I3C_SLV_STS1_IBI_PAYLOAD_SIZE GENMASK(31, 24) ++#define ASPEED_I3C_SLV_STS1_RSTACT GENMASK(22, 16) ++/* the parameters for the HDR-DDR Data Transfer Early Termination procedure*/ ++#define ASPEED_I3C_SLV_STS1_ETP_ACK_CAP BIT(15) ++#define ASPEED_I3C_SLV_STS1_ETP_W_REQ BIT(14) ++#define ASPEED_I3C_SLV_STS1_ETP_CRC GENMASK(13, 12) ++#define ASPEED_I3C_SLV_STS1_ENDXFER_CONFIRM BIT(11) ++#define ASPEED_I3C_SLV_STS1_ENTER_TEST_MDOE BIT(8) ++#define ASPEED_I3C_SLV_STS1_HJ_EN BIT(6) ++#define ASPEED_I3C_SLV_STS1_CR_EN BIT(5) ++#define ASPEED_I3C_SLV_STS1_IBI_EN BIT(4) ++#define ASPEED_I3C_SLV_STS1_HJ_DONE BIT(2) ++#define ASPEED_I3C_SLV_STS1_CR_DONE BIT(1) ++#define ASPEED_I3C_SLV_STS1_IBI_DONE BIT(0) ++#define ASPEED_I3C_SLV_STS2 0xB8 ++#define ASPEED_I3C_SLV_STS2_MWL GENMASK(31, 16) ++#define ASPEED_I3C_SLV_STS2_MRL GENMASK(15, 0) ++#define ASPEED_I3C_SLV_STS3_GROUP_ADDR 0xBC ++#define ASPEED_I3C_SLV_STS3_GROUP3_VALID BIT(31) ++#define ASPEED_I3C_SLV_STS3_GROUP3_ADDR GENMASK(30, 24) ++#define ASPEED_I3C_SLV_STS3_GROUP2_VALID BIT(23) ++#define ASPEED_I3C_SLV_STS3_GROUP2_ADDR GENMASK(22, 16) ++#define ASPEED_I3C_SLV_STS3_GROUP1_VALID BIT(15) ++#define ASPEED_I3C_SLV_STS3_GROUP1_ADDR GENMASK(14, 8) ++#define ASPEED_I3C_SLV_STS3_GROUP0_VALID BIT(7) ++#define ASPEED_I3C_SLV_STS3_GROUP0_ADDR GENMASK(6, 0) ++#define ASPEED_I3C_SLV_STS4_RSTACT_TIME 0xC0 ++#define ASPEED_I3C_SLV_STS4_DBG_NET GENMASK(23, 16) ++#define ASPEED_I3C_SLV_STS4_WHOLE_CHIP GENMASK(15, 8) ++#define ASPEED_I3C_SLV_STS4_I3C GENMASK(7, 0) ++#define ASPEED_I3C_SLV_STS5_GETMXDS_RW 0xC4 ++#define ASPEED_I3C_SLV_STS5_MAXWR GENMASK(15, 8) ++#define ASPEED_I3C_SLV_STS5_MAXRD GENMASK(7, 0) ++#define ASPEED_I3C_SLV_STS6_GETMXDS 0xC8 ++#define ASPEED_I3C_SLV_STS6_FORMAT BIT(24) ++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_H GENMASK(23, 16) ++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_M GENMASK(15, 8) ++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_L GENMASK(7, 0) ++#define ASPEED_I3C_SLV_STS7_GETSTATUS 0xCC ++#define ASPEED_I3C_SLV_STS7_PRECR GENMASK(31, 16) ++#define ASPEED_I3C_SLV_STS7_TGT GENMASK(15, 0) ++#define ASPEED_I3C_SLV_STS8_GETCAPS_TGT 0xD0 ++#define ASPEED_I3C_SLV_STS9_GETCAPS_VT_CR 0xD4 ++#define ASPEED_I3C_SLV_STS7_VT GENMASK(31, 16) ++#define ASPEED_I3C_SLV_STS7_CR GENMASK(15, 0) ++ ++#define ASPEED_I3C_QUEUE_PTR0 0xD8 ++#define QUEUE_PTR0_TX_R(q) FIELD_GET(GENMASK(24, 20), q) ++#define QUEUE_PTR0_TX_W(q) FIELD_GET(GENMASK(16, 12), q) ++#define QUEUE_PTR0_IBI_R(q) FIELD_GET(GENMASK(11, 10), q) ++#define QUEUE_PTR0_IBI_W(q) FIELD_GET(GENMASK(9, 8), q) ++#define QUEUE_PTR0_RESP_R(q) FIELD_GET(GENMASK(7, 6), q) ++#define QUEUE_PTR0_RESP_W(q) FIELD_GET(GENMASK(5, 4), q) ++#define QUEUE_PTR0_CMD_R(q) FIELD_GET(GENMASK(3, 2), q) ++#define QUEUE_PTR0_CMD_W(q) FIELD_GET(GENMASK(1, 0), q) ++ ++#define ASPEED_I3C_QUEUE_PTR1 0xDC ++#define QUEUE_PTR1_IBI_DATA_R(q) FIELD_GET(GENMASK(28, 24), q) ++#define QUEUE_PTR1_IBI_DATA_W(q) FIELD_GET(GENMASK(20, 16), q) ++#define QUEUE_PTR1_RX_R(q) FIELD_GET(GENMASK(12, 8), q) ++#define QUEUE_PTR1_RX_W(q) FIELD_GET(GENMASK(4, 0), q) ++ ++#define ASPEED_I3C_INTR_STATUS 0xE0 ++#define ASPEED_I3C_INTR_STATUS_ENABLE 0xE4 ++#define ASPEED_I3C_INTR_SIGNAL_ENABLE 0xE8 ++#define ASPEED_I3C_INTR_FORCE 0xEC ++#define ASPEED_I3C_INTR_I2C_SDA_STUCK_LOW BIT(14) ++#define ASPEED_I3C_INTR_I3C_SDA_STUCK_HIGH BIT(13) ++#define ASPEED_I3C_INTR_I3C_SDA_STUCK_LOW BIT(12) ++#define ASPEED_I3C_INTR_MST_INTERNAL_DONE BIT(10) ++#define ASPEED_I3C_INTR_MST_DDR_READ_DONE BIT(9) ++#define ASPEED_I3C_INTR_MST_DDR_WRITE_DONE BIT(8) ++#define ASPEED_I3C_INTR_MST_IBI_DONE BIT(7) ++#define ASPEED_I3C_INTR_MST_READ_DONE BIT(6) ++#define ASPEED_I3C_INTR_MST_WRITE_DONE BIT(5) ++#define ASPEED_I3C_INTR_MST_DAA_DONE BIT(4) ++#define ASPEED_I3C_INTR_SLV_SCL_STUCK BIT(1) ++#define ASPEED_I3C_INTR_TGRST BIT(0) ++ ++#define ASPEED_I3C_INTR_SUM_STATUS 0xF0 ++#define ASPEED_INTR_SUM_INHOUSE BIT(3) ++#define ASPEED_INTR_SUM_RHS BIT(2) ++#define ASPEED_INTR_SUM_PIO BIT(1) ++#define ASPEED_INTR_SUM_CAP BIT(0) ++ ++#define ASPEED_I3C_INTR_RENEW 0xF4 ++ ++/* Aspeed Phy register */ ++#define ast_phy_read(r) readl(hci->PHY_regs + (r)) ++#define ast_phy_write(r, v) writel(v, hci->PHY_regs + (r)) ++ ++#define PHY_SW_FORCE_CTRL 0x4 ++#define PHY_SW_FORCE_CTRL_SCL_IN_EN BIT(31) ++#define PHY_SW_FORCE_CTRL_SCL_OUT_EN BIT(30) ++#define PHY_SW_FORCE_CTRL_SCL_OE_EN BIT(29) ++#define PHY_SW_FORCE_CTRL_SCL_PU_EN BIT(28) ++#define PHY_SW_FORCE_CTRL_SDA_IN_EN BIT(27) ++#define PHY_SW_FORCE_CTRL_SDA_OUT_EN BIT(26) ++#define PHY_SW_FORCE_CTRL_SDA_OE_EN BIT(25) ++#define PHY_SW_FORCE_CTRL_SDA_PU_EN BIT(24) ++#define PHY_SW_FORCE_CTRL_SCL_IN_VAL BIT(13) ++#define PHY_SW_FORCE_CTRL_SCL_OUT_VAL BIT(12) ++#define PHY_SW_FORCE_CTRL_SCL_OE_VAL BIT(11) ++#define PHY_SW_FORCE_CTRL_SCL_PU_VAL GENMASK(10, 8) ++#define PHY_SW_FORCE_CTRL_SDA_IN_VAL BIT(5) ++#define PHY_SW_FORCE_CTRL_SDA_OUT_VAL BIT(4) ++#define PHY_SW_FORCE_CTRL_SDA_OE_VAL BIT(3) ++#define PHY_SW_FORCE_CTRL_SDA_PU_VAL GENMASK(2, 0) ++ ++/* I2C FM: 400K */ ++#define PHY_I2C_FM_CTRL0 0x8 ++#define PHY_I2C_FM_CTRL0_CAS GENMASK(25, 16) ++#define PHY_I2C_FM_CTRL0_SU_STO GENMASK(9, 0) ++#define PHY_I2C_FM_CTRL1 0xC ++#define PHY_I2C_FM_CTRL1_SCL_H GENMASK(25, 16) ++#define PHY_I2C_FM_CTRL1_SCL_L GENMASK(9, 0) ++#define PHY_I2C_FM_CTRL2 0x10 ++#define PHY_I2C_FM_CTRL2_ACK_H GENMASK(25, 16) ++#define PHY_I2C_FM_CTRL2_ACK_L GENMASK(9, 0) ++#define PHY_I2C_FM_CTRL3 0x14 ++#define PHY_I2C_FM_CTRL3_HD_DAT GENMASK(25, 16) ++#define PHY_I2C_FM_CTRL3_AHD_DAT GENMASK(9, 0) ++ ++#define PHY_I2C_FM_DEFAULT_CAS_NS 1130 ++#define PHY_I2C_FM_DEFAULT_SU_STO_NS 1370 ++#define PHY_I2C_FM_DEFAULT_SCL_H_NS 1130 ++#define PHY_I2C_FM_DEFAULT_SCL_L_NS 1370 ++#define PHY_I2C_FM_DEFAULT_HD_DAT 10 ++#define PHY_I2C_FM_DEFAULT_AHD_DAT 10 ++ ++/* I2C FMP: 1M */ ++#define PHY_I2C_FMP_CTRL0 0x18 ++#define PHY_I2C_FMP_CTRL0_CAS GENMASK(25, 16) ++#define PHY_I2C_FMP_CTRL0_SU_STO GENMASK(9, 0) ++#define PHY_I2C_FMP_CTRL1 0x1C ++#define PHY_I2C_FMP_CTRL1_SCL_H GENMASK(25, 16) ++#define PHY_I2C_FMP_CTRL1_SCL_L GENMASK(9, 0) ++#define PHY_I2C_FMP_CTRL2 0x20 ++#define PHY_I2C_FMP_CTRL2_ACK_H GENMASK(25, 16) ++#define PHY_I2C_FMP_CTRL2_ACK_L GENMASK(9, 0) ++#define PHY_I2C_FMP_CTRL3 0x24 ++#define PHY_I2C_FMP_CTRL3_HD_DAT GENMASK(25, 16) ++#define PHY_I2C_FMP_CTRL3_AHD_DAT GENMASK(9, 0) ++ ++#define PHY_I2C_FMP_DEFAULT_CAS_NS 380 ++#define PHY_I2C_FMP_DEFAULT_SU_STO_NS 620 ++#define PHY_I2C_FMP_DEFAULT_SCL_H_NS 380 ++#define PHY_I2C_FMP_DEFAULT_SCL_L_NS 620 ++#define PHY_I2C_FMP_DEFAULT_HD_DAT 10 ++#define PHY_I2C_FMP_DEFAULT_AHD_DAT 10 ++ ++/* I3C OD */ ++#define PHY_I3C_OD_CTRL0 0x28 ++#define PHY_I3C_OD_CTRL0_CAS GENMASK(25, 16) ++#define PHY_I3C_OD_CTRL0_CBP GENMASK(9, 0) ++#define PHY_I3C_OD_CTRL1 0x2C ++#define PHY_I3C_OD_CTRL1_SCL_H GENMASK(25, 16) ++#define PHY_I3C_OD_CTRL1_SCL_L GENMASK(9, 0) ++#define PHY_I3C_OD_CTRL2 0x30 ++#define PHY_I3C_OD_CTRL2_ACK_H GENMASK(25, 16) ++#define PHY_I3C_OD_CTRL2_ACK_L GENMASK(9, 0) ++#define PHY_I3C_OD_CTRL3 0x34 ++#define PHY_I3C_OD_CTRL3_HD_DAT GENMASK(25, 16) ++#define PHY_I3C_OD_CTRL3_AHD_DAT GENMASK(9, 0) ++ ++#define PHY_I3C_OD_DEFAULT_CAS_NS 40 ++#define PHY_I3C_OD_DEFAULT_CBP_NS 40 ++#define PHY_I3C_OD_DEFAULT_SCL_H_NS 380 ++#define PHY_I3C_OD_DEFAULT_SCL_L_NS 620 ++#define PHY_I3C_OD_DEFAULT_HD_DAT 10 ++#define PHY_I3C_OD_DEFAULT_AHD_DAT 10 ++ ++/* I3C PP SDR0 */ ++#define PHY_I3C_SDR0_CTRL0 0x38 ++#define PHY_I3C_SDR0_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_SDR0_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SDR0_CTRL1 0x3C ++#define PHY_I3C_SDR0_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_SDR0_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_SDR0_CTRL2 0x40 ++#define PHY_I3C_SDR0_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_SDR0_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++ ++/* 1MHz */ ++#define PHY_I3C_SDR0_DEFAULT_SCL_H_NS 380 ++#define PHY_I3C_SDR0_DEFAULT_SCL_L_NS 620 ++#define PHY_I3C_SDR0_DEFAULT_TBIT_H_NS 380 ++#define PHY_I3C_SDR0_DEFAULT_TBIT_L_NS 620 ++#define PHY_I3C_SDR0_DEFAULT_HD_PP_NS 10 ++#define PHY_I3C_SDR0_DEFAULT_TBIT_HD_PP_NS 10 ++ ++#define PHY_I3C_CTRL0_OFFSET 0x0 ++#define PHY_I3C_CTRL1_OFFSET 0x4 ++#define PHY_I3C_CTRL2_OFFSET 0x8 ++/* I3C PP SDR1 */ ++#define PHY_I3C_SDR1_CTRL0 0x44 ++#define PHY_I3C_SDR1_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_SDR1_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SDR1_CTRL1 0x48 ++#define PHY_I3C_SDR1_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_SDR1_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_SDR1_CTRL2 0x4C ++#define PHY_I3C_SDR1_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_SDR1_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++/* I3C PP SDR2 */ ++#define PHY_I3C_SDR2_CTRL0 0x50 ++#define PHY_I3C_SDR2_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_SDR2_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SDR2_CTRL1 0x54 ++#define PHY_I3C_SDR2_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_SDR2_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_SDR2_CTRL2 0x58 ++#define PHY_I3C_SDR2_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_SDR2_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++/* I3C PP SDR3 */ ++#define PHY_I3C_SDR3_CTRL0 0x5C ++#define PHY_I3C_SDR3_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_SDR3_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SDR3_CTRL1 0x60 ++#define PHY_I3C_SDR3_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_SDR3_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_SDR3_CTRL2 0x64 ++#define PHY_I3C_SDR3_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_SDR3_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++/* I3C PP SDR4 */ ++#define PHY_I3C_SDR4_CTRL0 0x68 ++#define PHY_I3C_SDR4_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_SDR4_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SDR4_CTRL1 0x6C ++#define PHY_I3C_SDR4_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_SDR4_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_SDR4_CTRL2 0x70 ++#define PHY_I3C_SDR4_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_SDR4_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++/* I3C PP DDR */ ++#define PHY_I3C_DDR_CTRL0 0x74 ++#define PHY_I3C_DDR_CTRL0_SCL_H GENMASK(25, 16) ++#define PHY_I3C_DDR_CTRL0_SCL_L GENMASK(9, 0) ++#define PHY_I3C_DDR_CTRL1 0x78 ++#define PHY_I3C_DDR_CTRL1_TBIT_H GENMASK(25, 16) ++#define PHY_I3C_DDR_CTRL1_TBIT_L GENMASK(9, 0) ++#define PHY_I3C_DDR_CTRL2 0x7C ++#define PHY_I3C_DDR_CTRL2_HD_PP GENMASK(25, 16) ++#define PHY_I3C_DDR_CTRL2_TBIT_HD_PP GENMASK(9, 0) ++ ++/* 1MHz */ ++#define PHY_I3C_DDR_DEFAULT_SCL_H_NS 380 ++#define PHY_I3C_DDR_DEFAULT_SCL_L_NS 620 ++#define PHY_I3C_DDR_DEFAULT_TBIT_H_NS 380 ++#define PHY_I3C_DDR_DEFAULT_TBIT_L_NS 620 ++#define PHY_I3C_DDR_DEFAULT_HD_PP_NS 10 ++#define PHY_I3C_DDR_DEFAULT_TBIT_HD_PP_NS 10 ++ ++#define PHY_I3C_SR_P_PREPARE_CTRL 0x80 ++#define PHY_I3C_SR_P_PREPARE_CTRL_HD GENMASK(25, 16) ++#define PHY_I3C_SR_P_PREPARE_CTRL_SCL_L GENMASK(9, 0) ++#define PHY_I3C_SR_P_DEFAULT_HD_NS 16 ++#define PHY_I3C_SR_P_DEFAULT_SCL_L_NS 40 ++ ++#define PHY_PULLUP_EN 0x98 ++#define PHY_PULLUP_EN_SCL GENMASK(14, 12) ++#define PHY_PULLUP_EN_SDA GENMASK(10, 8) ++#define PHY_PULLUP_EN_DDR_SCL GENMASK(6, 4) ++#define PHY_PULLUP_EN_DDR_SDA GENMASK(2, 0) ++ ++#define PHY_I3C_OD_CTRL4 0xD8 ++// SDA drive high (push-pull) time After tCBP ++#define PHY_I3C_OD_CTRL4_DAP GENMASK(26, 16) ++#define PHY_I3C_OD_DEFAULT_DAP_NS 12 ++ ++static inline unsigned int aspeed_get_avail_tx_entries(struct i3c_hci *hci) ++{ ++ unsigned int queue_ptr, entries; ++ ++ queue_ptr = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR0); ++ if (QUEUE_PTR0_TX_W(queue_ptr) >= QUEUE_PTR0_TX_R(queue_ptr)) ++ entries = 0x20 - (QUEUE_PTR0_TX_W(queue_ptr) - ++ QUEUE_PTR0_TX_R(queue_ptr)); ++ else ++ entries = QUEUE_PTR0_TX_R(queue_ptr) - QUEUE_PTR0_TX_W(queue_ptr); ++ ++ return entries; ++} ++ ++static inline unsigned int aspeed_get_received_rx_entries(struct i3c_hci *hci) ++{ ++ unsigned int queue_ptr, entries; ++ ++ queue_ptr = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR1); ++ if (QUEUE_PTR1_RX_W(queue_ptr) >= QUEUE_PTR1_RX_R(queue_ptr)) ++ entries = QUEUE_PTR1_RX_W(queue_ptr) - QUEUE_PTR1_RX_R(queue_ptr); ++ else ++ entries = 0x20 - (QUEUE_PTR1_RX_R(queue_ptr) - ++ QUEUE_PTR1_RX_W(queue_ptr)); ++ ++ return entries; ++} ++ ++static inline unsigned int aspeed_get_i3c_revision_id(struct i3c_hci *hci) ++{ ++ return FIELD_GET(GENMASK(23, 16), hci->vendor_product_id); ++} ++ ++static inline void aspeed_i3c_ccc_handler(struct i3c_hci *hci, u8 ccc) ++{ ++ u32 reg; ++ u8 dynamic_addr; ++ ++ switch (ccc) { ++ case I3C_CCC_RSTDAA(true): ++ case I3C_CCC_RSTDAA(false): ++ hci->master.this->info.dyn_addr = 0; ++ break; ++ case I3C_CCC_ENTDAA: ++ case I3C_CCC_SETDASA: ++ case I3C_CCC_SETNEWDA: ++ case I3C_CCC_SETAASA: ++ reg = ast_inhouse_read(ASPEED_I3C_STS); ++ if (reg & ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS_VALID) { ++ dynamic_addr = FIELD_GET(ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS, reg); ++ hci->master.this->info.dyn_addr = dynamic_addr; ++ } ++ break; ++ } ++} ++ ++#endif +diff --git a/drivers/i3c/mctp/Kconfig b/drivers/i3c/mctp/Kconfig +--- a/drivers/i3c/mctp/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i3c/mctp/Kconfig 2025-12-23 10:16:19.325062821 +0000 +@@ -0,0 +1,23 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++config I3C_MCTP ++ tristate "I3C Controller MCTP driver" ++ depends on I3C ++help ++ Say yes here to enable the I3C MCTP driver for I3C HW that is ++ configured as an I3C Controller Device on the I3C Bus. ++ ++config I3C_MCTP_HDR_DDR ++ bool "transfer with HDR-DDR mode" ++ depends on I3C_MCTP ++ default n ++ help ++ Say yes here to use the HDR-DDR mode as default to transfer data if ++ the device support it. ++ ++config I3C_TARGET_MCTP ++ tristate "I3C Target MCTP driver" ++ depends on I3C ++ select CRC8 ++help ++ Say yes here to enable the I3C MCTP driver for I3C HW that is ++ configured as an I3C Target Device on the I3C Bus. +diff --git a/drivers/i3c/mctp/Makefile b/drivers/i3c/mctp/Makefile +--- a/drivers/i3c/mctp/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i3c/mctp/Makefile 2025-12-23 10:16:19.325062821 +0000 +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++obj-$(CONFIG_I3C_MCTP) += i3c-mctp.o ++obj-$(CONFIG_I3C_TARGET_MCTP) += i3c-target-mctp.o +diff --git a/drivers/i3c/mctp/i3c-mctp.c b/drivers/i3c/mctp/i3c-mctp.c +--- a/drivers/i3c/mctp/i3c-mctp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i3c/mctp/i3c-mctp.c 2025-12-23 10:16:19.325062821 +0000 +@@ -0,0 +1,697 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2022 Intel Corporation.*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++ ++#define I3C_MCTP_MINORS 32 ++#define CCC_DEVICE_STATUS_PENDING_INTR(x) (((x) & GENMASK(3, 0)) >> 0) ++#define POLLING_TIMEOUT_MS 50 ++#define MCTP_INTERRUPT_NUMBER 1 ++#define RX_RING_COUNT 16 ++#define I3C_MCTP_MIN_TRANSFER_SIZE 69 ++#define I3C_MCTP_IBI_PAYLOAD_SIZE 2 ++ ++struct i3c_mctp { ++ struct i3c_device *i3c; ++ struct cdev cdev; ++ struct device *dev; ++ struct delayed_work polling_work; ++ struct platform_device *i3c_peci; ++ int id; ++ /* ++ * Restrict an access to the /dev descriptor to one ++ * user at a time. ++ */ ++ spinlock_t device_file_lock; ++ int device_open; ++ /* Currently only one userspace client is supported */ ++ struct i3c_mctp_client *default_client; ++ struct i3c_mctp_client *peci_client; ++ u16 max_read_len; ++ u16 max_write_len; ++}; ++ ++struct i3c_mctp_client { ++ struct i3c_mctp *priv; ++ struct ptr_ring rx_queue; ++ wait_queue_head_t wait_queue; ++}; ++ ++static struct class *i3c_mctp_class; ++static dev_t i3c_mctp_devt; ++static DEFINE_IDA(i3c_mctp_ida); ++ ++static struct kmem_cache *packet_cache; ++ ++/** ++ * i3c_mctp_packet_alloc() - allocates i3c_mctp_packet ++ * ++ * @flags: the type of memory to allocate ++ * ++ * Allocates i3c_mctp_packet via slab allocation ++ * Return: pointer to the packet, NULL if some error occurred ++ */ ++void *i3c_mctp_packet_alloc(gfp_t flags) ++{ ++ return kmem_cache_alloc(packet_cache, flags); ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_packet_alloc); ++ ++/** ++ * i3c_mctp_packet_free() - frees i3c_mctp_packet ++ * ++ * @packet: pointer to the packet which should be freed ++ * ++ * Frees i3c_mctp_packet previously allocated via slab allocation ++ */ ++void i3c_mctp_packet_free(void *packet) ++{ ++ kmem_cache_free(packet_cache, packet); ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_packet_free); ++ ++static void i3c_mctp_client_free(struct i3c_mctp_client *client) ++{ ++ ptr_ring_cleanup(&client->rx_queue, &i3c_mctp_packet_free); ++ ++ kfree(client); ++} ++ ++static struct i3c_mctp_client *i3c_mctp_client_alloc(struct i3c_mctp *priv) ++{ ++ struct i3c_mctp_client *client; ++ int ret; ++ ++ client = kzalloc(sizeof(*client), GFP_KERNEL); ++ if (!client) ++ goto out; ++ ++ client->priv = priv; ++ ret = ptr_ring_init(&client->rx_queue, RX_RING_COUNT, GFP_KERNEL); ++ if (ret) ++ return ERR_PTR(ret); ++ init_waitqueue_head(&client->wait_queue); ++out: ++ return client; ++} ++ ++static struct i3c_mctp_client *i3c_mctp_find_client(struct i3c_mctp *priv, ++ struct i3c_mctp_packet *packet) ++{ ++ u8 *msg_hdr = (u8 *)packet->data.payload; ++ u8 mctp_type = msg_hdr[MCTP_MSG_HDR_MSG_TYPE_OFFSET]; ++ u16 vendor = (msg_hdr[MCTP_MSG_HDR_VENDOR_OFFSET] << 8 ++ | msg_hdr[MCTP_MSG_HDR_VENDOR_OFFSET + 1]); ++ u8 intel_msg_op_code = msg_hdr[MCTP_MSG_HDR_OPCODE_OFFSET]; ++ ++ if (priv->peci_client && mctp_type == MCTP_MSG_TYPE_VDM_PCI && ++ vendor == MCTP_VDM_PCI_INTEL_VENDOR_ID && intel_msg_op_code == MCTP_VDM_PCI_INTEL_PECI) ++ return priv->peci_client; ++ ++ return priv->default_client; ++} ++ ++static struct i3c_mctp_packet *i3c_mctp_read_packet(struct i3c_device *i3c) ++{ ++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3c)); ++ struct i3c_mctp_packet *rx_packet; ++ struct i3c_priv_xfer xfers = { ++ .rnw = true, ++ }; ++ int ret; ++ ++ rx_packet = i3c_mctp_packet_alloc(GFP_KERNEL); ++ if (!rx_packet) ++ return ERR_PTR(-ENOMEM); ++ ++ rx_packet->size = I3C_MCTP_PACKET_SIZE; ++ xfers.len = rx_packet->size; ++ xfers.data.in = &rx_packet->data; ++ ++ /* Check against packet size + PEC byte to make sure that we always try to read max */ ++ if (priv->max_read_len != xfers.len + 1) { ++ dev_dbg(i3cdev_to_dev(i3c), "Length mismatch. MRL = %d, xfers.len = %d", ++ priv->max_read_len, xfers.len); ++ i3c_mctp_packet_free(rx_packet); ++ return ERR_PTR(-EINVAL); ++ } ++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) && ++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) { ++ struct i3c_hdr_cmd cmds; ++ ++ cmds.mode = I3C_HDR_DDR; ++ cmds.code = 0x80; ++ cmds.ndatawords = DIV_ROUND_UP(rx_packet->size, 2); ++ cmds.data.in = &rx_packet->data; ++ ret = i3c_device_send_hdr_cmds(i3c, &cmds, 1); ++ if (!ret) ++ rx_packet->size = cmds.ndatawords; ++ } else { ++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1); ++ if (!ret) ++ rx_packet->size = xfers.len; ++ } ++ if (ret) { ++ i3c_mctp_packet_free(rx_packet); ++ return ERR_PTR(ret); ++ } ++ ++ return rx_packet; ++} ++ ++static void i3c_mctp_dispatch_packet(struct i3c_mctp *priv, struct i3c_mctp_packet *packet) ++{ ++ struct i3c_mctp_client *client = i3c_mctp_find_client(priv, packet); ++ int ret; ++ ++ ret = ptr_ring_produce(&client->rx_queue, packet); ++ if (ret) ++ i3c_mctp_packet_free(packet); ++ else ++ wake_up_all(&client->wait_queue); ++} ++ ++static void i3c_mctp_polling_work(struct work_struct *work) ++{ ++ struct i3c_mctp *priv = container_of(to_delayed_work(work), struct i3c_mctp, polling_work); ++ struct i3c_device *i3cdev = priv->i3c; ++ struct i3c_mctp_packet *rx_packet; ++ struct i3c_device_info info; ++ int ret; ++ ++ i3c_device_get_info(i3cdev, &info); ++ ret = i3c_device_getstatus_ccc(i3cdev, &info); ++ if (ret) ++ return; ++ ++ if (CCC_DEVICE_STATUS_PENDING_INTR(info.status) != MCTP_INTERRUPT_NUMBER) ++ return; ++ ++ rx_packet = i3c_mctp_read_packet(i3cdev); ++ if (IS_ERR(rx_packet)) ++ goto out; ++ ++ i3c_mctp_dispatch_packet(priv, rx_packet); ++out: ++ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS)); ++} ++ ++static ssize_t i3c_mctp_write(struct file *file, const char __user *buf, size_t count, ++ loff_t *f_pos) ++{ ++ struct i3c_mctp *priv = file->private_data; ++ struct i3c_device *i3c = priv->i3c; ++ struct i3c_priv_xfer xfers = { ++ .rnw = false, ++ .len = count, ++ }; ++ u8 *data; ++ int ret; ++ ++ /* ++ * Check against packet size + PEC byte ++ * to not send more data than it was set in the probe ++ */ ++ if (priv->max_write_len < xfers.len + 1) { ++ dev_dbg(i3cdev_to_dev(i3c), "Length mismatch. MWL = %d, xfers.len = %d", ++ priv->max_write_len, xfers.len); ++ return -EINVAL; ++ } ++ ++ data = memdup_user(buf, count); ++ if (IS_ERR(data)) ++ return PTR_ERR(data); ++ ++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) && ++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) { ++ struct i3c_hdr_cmd cmds; ++ ++ cmds.mode = I3C_HDR_DDR; ++ cmds.code = 0; ++ cmds.ndatawords = DIV_ROUND_UP(count, 2); ++ cmds.data.out = data; ++ ret = i3c_device_send_hdr_cmds(i3c, &cmds, 1); ++ } else { ++ xfers.data.out = data; ++ ++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1); ++ } ++ kfree(data); ++ return ret ?: count; ++} ++ ++static ssize_t i3c_mctp_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) ++{ ++ struct i3c_mctp *priv = file->private_data; ++ struct i3c_mctp_client *client = priv->default_client; ++ struct i3c_mctp_packet *rx_packet; ++ ++ if (count > sizeof(rx_packet->data)) ++ count = sizeof(rx_packet->data); ++ ++ rx_packet = ptr_ring_consume(&client->rx_queue); ++ if (!rx_packet) ++ return -EAGAIN; ++ ++ if (count > rx_packet->size) ++ count = rx_packet->size; ++ ++ if (copy_to_user(buf, &rx_packet->data, count)) ++ return -EFAULT; ++ ++ i3c_mctp_packet_free(rx_packet); ++ ++ return count; ++} ++ ++static int i3c_mctp_open(struct inode *inode, struct file *file) ++{ ++ struct i3c_mctp *priv = container_of(inode->i_cdev, struct i3c_mctp, cdev); ++ ++ spin_lock(&priv->device_file_lock); ++ if (priv->device_open) { ++ spin_unlock(&priv->device_file_lock); ++ return -EBUSY; ++ } ++ priv->device_open++; ++ /* Discard all of the packet in the rx_queue */ ++ while (ptr_ring_consume(&priv->default_client->rx_queue)) ++ ; ++ spin_unlock(&priv->device_file_lock); ++ ++ file->private_data = priv; ++ ++ return 0; ++} ++ ++static int i3c_mctp_release(struct inode *inode, struct file *file) ++{ ++ struct i3c_mctp *priv = file->private_data; ++ ++ spin_lock(&priv->device_file_lock); ++ priv->device_open--; ++ spin_unlock(&priv->device_file_lock); ++ ++ file->private_data = NULL; ++ ++ return 0; ++} ++ ++static __poll_t i3c_mctp_poll(struct file *file, struct poll_table_struct *pt) ++{ ++ struct i3c_mctp *priv = file->private_data; ++ __poll_t ret = 0; ++ ++ poll_wait(file, &priv->default_client->wait_queue, pt); ++ ++ if (__ptr_ring_peek(&priv->default_client->rx_queue)) ++ ret |= EPOLLIN; ++ ++ return ret; ++} ++ ++static const struct file_operations i3c_mctp_fops = { ++ .owner = THIS_MODULE, ++ .read = i3c_mctp_read, ++ .write = i3c_mctp_write, ++ .poll = i3c_mctp_poll, ++ .open = i3c_mctp_open, ++ .release = i3c_mctp_release, ++}; ++ ++/** ++ * i3c_mctp_add_peci_client() - registers PECI client ++ * @i3c: I3C device to get the PECI client for ++ * ++ * Return: pointer to PECI client, -ENOMEM - in case of client alloc fault ++ */ ++struct i3c_mctp_client *i3c_mctp_add_peci_client(struct i3c_device *i3c) ++{ ++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3c)); ++ struct i3c_mctp_client *client; ++ ++ client = i3c_mctp_client_alloc(priv); ++ if (IS_ERR(client)) ++ return ERR_PTR(-ENOMEM); ++ ++ priv->peci_client = client; ++ ++ return priv->peci_client; ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_add_peci_client); ++ ++/** ++ * i3c_mctp_remove_peci_client() - un-registers PECI client ++ * @client: i3c_mctp_client to be freed ++ */ ++void i3c_mctp_remove_peci_client(struct i3c_mctp_client *client) ++{ ++ struct i3c_mctp *priv = client->priv; ++ ++ i3c_mctp_client_free(priv->peci_client); ++ ++ priv->peci_client = NULL; ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_remove_peci_client); ++ ++static struct i3c_mctp *i3c_mctp_alloc(struct i3c_device *i3c) ++{ ++ struct i3c_mctp *priv; ++ int id; ++ ++ priv = devm_kzalloc(i3cdev_to_dev(i3c), sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return ERR_PTR(-ENOMEM); ++ ++ id = ida_alloc(&i3c_mctp_ida, GFP_KERNEL); ++ if (id < 0) { ++ pr_err("i3c_mctp: no minor number available!\n"); ++ return ERR_PTR(id); ++ } ++ ++ priv->id = id; ++ priv->i3c = i3c; ++ ++ spin_lock_init(&priv->device_file_lock); ++ ++ return priv; ++} ++ ++static void i3c_mctp_ibi_handler(struct i3c_device *dev, const struct i3c_ibi_payload *payload) ++{ ++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(dev)); ++ struct i3c_mctp_packet *rx_packet; ++ ++ rx_packet = i3c_mctp_read_packet(dev); ++ if (IS_ERR(rx_packet)) ++ return; ++ ++ i3c_mctp_dispatch_packet(priv, rx_packet); ++} ++ ++static int i3c_mctp_init(struct i3c_driver *drv) ++{ ++ int ret; ++ ++ packet_cache = kmem_cache_create_usercopy("mctp-i3c-packet", ++ sizeof(struct i3c_mctp_packet), 0, 0, 0, ++ sizeof(struct i3c_mctp_packet), NULL); ++ if (IS_ERR(packet_cache)) { ++ ret = PTR_ERR(packet_cache); ++ goto out; ++ } ++ ++ /* Dynamically request unused major number */ ++ ret = alloc_chrdev_region(&i3c_mctp_devt, 0, I3C_MCTP_MINORS, "i3c-mctp"); ++ if (ret) ++ goto out; ++ ++ /* Create a class to populate sysfs entries*/ ++ i3c_mctp_class = class_create("i3c-mctp"); ++ if (IS_ERR(i3c_mctp_class)) { ++ ret = PTR_ERR(i3c_mctp_class); ++ goto out_unreg_chrdev; ++ } ++ ++ i3c_driver_register(drv); ++ ++ return 0; ++ ++out_unreg_chrdev: ++ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS); ++out: ++ pr_err("i3c_mctp: driver initialisation failed\n"); ++ return ret; ++} ++ ++static void i3c_mctp_free(struct i3c_driver *drv) ++{ ++ i3c_driver_unregister(drv); ++ class_destroy(i3c_mctp_class); ++ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS); ++ kmem_cache_destroy(packet_cache); ++} ++ ++static int i3c_mctp_enable_ibi(struct i3c_device *i3cdev) ++{ ++ struct i3c_ibi_setup ibireq = { ++ .handler = i3c_mctp_ibi_handler, ++ .max_payload_len = 2, ++ .num_slots = 10, ++ }; ++ int ret; ++ ++ ret = i3c_device_request_ibi(i3cdev, &ibireq); ++ if (ret) ++ return ret; ++ ret = i3c_device_enable_ibi(i3cdev); ++ if (ret) ++ i3c_device_free_ibi(i3cdev); ++ ++ return ret; ++} ++ ++static void i3c_mctp_disable_ibi(struct i3c_device *i3cdev) ++{ ++ i3c_device_disable_ibi(i3cdev); ++ i3c_device_free_ibi(i3cdev); ++} ++ ++/** ++ * i3c_mctp_get_eid() - receive MCTP EID assigned to the device ++ * ++ * @client: client for the device to get the EID for ++ * @domain_id: requested domain ID ++ * @eid: pointer to store EID value ++ * ++ * Receive MCTP endpoint ID dynamically assigned by the MCTP Bus Owner ++ * Return: 0 in case of success, a negative error code otherwise. ++ */ ++int i3c_mctp_get_eid(struct i3c_mctp_client *client, u8 domain_id, u8 *eid) ++{ ++ /* TODO: Implement EID assignment basing on domain ID */ ++ *eid = 1; ++ return 0; ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_get_eid); ++ ++/** ++ * i3c_mctp_send_packet() - send mctp packet ++ * ++ * @tx_packet: the allocated packet that needs to be send via I3C ++ * @i3c: i3c device to send the packet to ++ * ++ * Return: 0 in case of success, a negative error code otherwise. ++ */ ++int i3c_mctp_send_packet(struct i3c_device *i3c, struct i3c_mctp_packet *tx_packet) ++{ ++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) && ++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) { ++ struct i3c_hdr_cmd cmds; ++ ++ cmds.mode = I3C_HDR_DDR; ++ cmds.code = 0; ++ cmds.ndatawords = DIV_ROUND_UP(tx_packet->size, 2); ++ cmds.data.out = &tx_packet->data; ++ return i3c_device_send_hdr_cmds(i3c, &cmds, 1); ++ } ++ struct i3c_priv_xfer xfers; ++ ++ xfers.rnw = false; ++ xfers.len = tx_packet->size; ++ xfers.data.out = &tx_packet->data; ++ return i3c_device_do_priv_xfers(i3c, &xfers, 1); ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_send_packet); ++ ++/** ++ * i3c_mctp_receive_packet() - receive mctp packet ++ * ++ * @client: i3c_mctp_client to receive the packet from ++ * @timeout: timeout, in jiffies ++ * ++ * The function will sleep for up to @timeout if no packet is ready to read. ++ * ++ * Returns struct i3c_mctp_packet from or ERR_PTR in case of error or the ++ * timeout elapsed. ++ */ ++struct i3c_mctp_packet *i3c_mctp_receive_packet(struct i3c_mctp_client *client, ++ unsigned long timeout) ++{ ++ struct i3c_mctp_packet *rx_packet; ++ int ret; ++ ++ ret = wait_event_interruptible_timeout(client->wait_queue, ++ __ptr_ring_peek(&client->rx_queue), timeout); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ else if (ret == 0) ++ return ERR_PTR(-ETIME); ++ ++ rx_packet = ptr_ring_consume(&client->rx_queue); ++ if (!rx_packet) ++ return ERR_PTR(-EAGAIN); ++ ++ return rx_packet; ++} ++EXPORT_SYMBOL_GPL(i3c_mctp_receive_packet); ++ ++static void i3c_mctp_i3c_event_cb(struct i3c_device *dev, enum i3c_event event) ++{ ++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(dev)); ++ ++ switch (event) { ++ case i3c_event_prepare_for_rescan: ++ /* ++ * Disable IBI and polling mode blindly. ++ */ ++ i3c_mctp_disable_ibi(dev); ++ cancel_delayed_work(&priv->polling_work); ++ break; ++ case i3c_event_rescan_done: ++ if (i3c_mctp_enable_ibi(dev)) { ++ INIT_DELAYED_WORK(&priv->polling_work, ++ i3c_mctp_polling_work); ++ schedule_delayed_work(&priv->polling_work, ++ msecs_to_jiffies(POLLING_TIMEOUT_MS)); ++ } ++ break; ++ default: ++ break; ++ } ++} ++ ++static int i3c_mctp_probe(struct i3c_device *i3cdev) ++{ ++ int ibi_payload_size = I3C_MCTP_IBI_PAYLOAD_SIZE; ++ struct device *dev = i3cdev_to_dev(i3cdev); ++ struct i3c_device_info info; ++ struct i3c_mctp *priv; ++ int ret; ++ ++ priv = i3c_mctp_alloc(i3cdev); ++ if (IS_ERR(priv)) ++ return PTR_ERR(priv); ++ ++ cdev_init(&priv->cdev, &i3c_mctp_fops); ++ ++ priv->cdev.owner = THIS_MODULE; ++ ret = cdev_add(&priv->cdev, MKDEV(MAJOR(i3c_mctp_devt), priv->id), 1); ++ if (ret) ++ goto error_cdev; ++ ++ /* register this i3c device with the driver core */ ++ priv->dev = device_create(i3c_mctp_class, dev, ++ MKDEV(MAJOR(i3c_mctp_devt), priv->id), ++ NULL, "i3c-mctp-%d", priv->id); ++ if (IS_ERR(priv->dev)) { ++ ret = PTR_ERR(priv->dev); ++ goto error; ++ } ++ ++ ret = i3c_device_control_pec(i3cdev, true); ++ if (ret) ++ dev_warn(priv->dev, "Hardware not support pec"); ++ ++ priv->default_client = i3c_mctp_client_alloc(priv); ++ if (IS_ERR(priv->default_client)) ++ goto error; ++ ++ dev_set_drvdata(i3cdev_to_dev(i3cdev), priv); ++ ++ priv->i3c_peci = platform_device_register_data(i3cdev_to_dev(i3cdev), "peci-i3c", priv->id, ++ NULL, 0); ++ if (IS_ERR(priv->i3c_peci)) ++ dev_warn(priv->dev, "failed to register peci-i3c device\n"); ++ ++ i3c_device_register_event_cb(i3cdev, i3c_mctp_i3c_event_cb); ++ if (i3c_mctp_enable_ibi(i3cdev)) { ++ INIT_DELAYED_WORK(&priv->polling_work, i3c_mctp_polling_work); ++ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS)); ++ ibi_payload_size = 0; ++ } ++ ++ i3c_device_get_info(i3cdev, &info); ++ ++ ret = i3c_device_getmrl_ccc(i3cdev, &info); ++ if (ret || info.max_read_len != I3C_MCTP_MIN_TRANSFER_SIZE) ++ ret = i3c_device_setmrl_ccc(i3cdev, &info, I3C_MCTP_MIN_TRANSFER_SIZE, ++ ibi_payload_size); ++ if (ret && info.max_read_len != I3C_MCTP_MIN_TRANSFER_SIZE) { ++ dev_err(dev, "Failed to set MRL!, ret = %d\n", ret); ++ goto error_peci; ++ } ++ priv->max_read_len = info.max_read_len; ++ ++ ret = i3c_device_getmwl_ccc(i3cdev, &info); ++ if (ret || info.max_write_len != I3C_MCTP_MIN_TRANSFER_SIZE) ++ ret = i3c_device_setmwl_ccc(i3cdev, &info, I3C_MCTP_MIN_TRANSFER_SIZE); ++ if (ret && info.max_write_len != I3C_MCTP_MIN_TRANSFER_SIZE) { ++ dev_err(dev, "Failed to set MWL!, ret = %d\n", ret); ++ goto error_peci; ++ } ++ priv->max_write_len = info.max_write_len; ++ ++ return 0; ++ ++error_peci: ++ platform_device_unregister(priv->i3c_peci); ++ i3c_device_disable_ibi(i3cdev); ++ i3c_device_free_ibi(i3cdev); ++error: ++ cdev_del(&priv->cdev); ++error_cdev: ++ put_device(dev); ++ return ret; ++} ++ ++static void i3c_mctp_remove(struct i3c_device *i3cdev) ++{ ++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev)); ++ ++ i3c_mctp_disable_ibi(i3cdev); ++ i3c_mctp_client_free(priv->default_client); ++ priv->default_client = NULL; ++ platform_device_unregister(priv->i3c_peci); ++ ++ device_destroy(i3c_mctp_class, MKDEV(MAJOR(i3c_mctp_devt), priv->id)); ++ cdev_del(&priv->cdev); ++ ida_free(&i3c_mctp_ida, priv->id); ++} ++ ++static const struct i3c_device_id i3c_mctp_ids[] = { ++ I3C_CLASS(0xCC, 0x0), ++ I3C_DEVICE(0x3f6, 0x8000, (void *)0), ++ I3C_DEVICE(0x3f6, 0x8001, (void *)0), ++ I3C_DEVICE(0x3f6, 0xA001, (void *)0), ++ I3C_DEVICE(0x3f6, 0xA003, (void *)0), ++ I3C_DEVICE(0x3f6, 0x0503, (void *)0), ++ { }, ++}; ++ ++static struct i3c_driver i3c_mctp_drv = { ++ .driver.name = "i3c-mctp", ++ .id_table = i3c_mctp_ids, ++ .probe = i3c_mctp_probe, ++ .remove = i3c_mctp_remove, ++}; ++ ++module_driver(i3c_mctp_drv, i3c_mctp_init, i3c_mctp_free); ++MODULE_AUTHOR("Oleksandr Shulzhenko "); ++MODULE_DESCRIPTION("I3C MCTP driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i3c/mctp/i3c-target-mctp.c b/drivers/i3c/mctp/i3c-target-mctp.c +--- a/drivers/i3c/mctp/i3c-target-mctp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/i3c/mctp/i3c-target-mctp.c 2025-12-23 10:16:19.325062821 +0000 +@@ -0,0 +1,485 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (C) 2022 Intel Corporation.*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define I3C_CRC8_POLYNOMIAL 0x07 ++DECLARE_CRC8_TABLE(i3c_crc8_table); ++ ++#define I3C_TARGET_MCTP_MINORS 32 ++#define RX_RING_COUNT 16 ++ ++/* ++ * IBI Mandatory Data Byte ++ * https://www.mipi.org/mipi_i3c_mandatory_data_byte_values_public ++ * ++ * MCTP: ++ * bit[7:5] = 3'b101 ++ * bit[4:0] = 5'h0E ++ */ ++#define I3C_MCTP_MDB 0xae ++ ++static struct class *i3c_target_mctp_class; ++static dev_t i3c_target_mctp_devt; ++static DEFINE_IDA(i3c_target_mctp_ida); ++ ++struct mctp_client; ++ ++struct i3c_target_mctp { ++ struct i3c_device *i3cdev; ++ struct cdev cdev; ++ int id; ++ struct mctp_client *client; ++ spinlock_t client_lock; /* to protect client access */ ++ bool mdb_append_pec; ++}; ++ ++struct mctp_client { ++ struct kref ref; ++ struct i3c_target_mctp *priv; ++ struct ptr_ring rx_queue; ++ wait_queue_head_t wait_queue; ++}; ++ ++struct mctp_packet { ++ u8 *data; ++ u16 count; ++}; ++ ++static void *i3c_target_mctp_packet_alloc(u16 count) ++{ ++ struct mctp_packet *packet; ++ u8 *data; ++ ++ packet = kzalloc(sizeof(*packet), GFP_ATOMIC); ++ if (!packet) ++ return NULL; ++ ++ data = kzalloc(count, GFP_ATOMIC); ++ if (!data) { ++ kfree(packet); ++ return NULL; ++ } ++ ++ packet->data = data; ++ packet->count = count; ++ ++ return packet; ++} ++ ++static void i3c_target_mctp_packet_free(void *data) ++{ ++ struct mctp_packet *packet = data; ++ ++ kfree(packet->data); ++ kfree(packet); ++} ++ ++static struct mctp_client *i3c_target_mctp_client_alloc(struct i3c_target_mctp *priv) ++{ ++ struct mctp_client *client; ++ ++ client = kzalloc(sizeof(*client), GFP_KERNEL); ++ if (!client) ++ goto out; ++ ++ kref_init(&client->ref); ++ client->priv = priv; ++ ptr_ring_init(&client->rx_queue, RX_RING_COUNT, GFP_KERNEL); ++out: ++ return client; ++} ++ ++static void i3c_target_mctp_client_free(struct kref *ref) ++{ ++ struct mctp_client *client = container_of(ref, typeof(*client), ref); ++ ++ ptr_ring_cleanup(&client->rx_queue, &i3c_target_mctp_packet_free); ++ ++ kfree(client); ++} ++ ++static void i3c_target_mctp_client_get(struct mctp_client *client) ++{ ++ kref_get(&client->ref); ++} ++ ++static void i3c_target_mctp_client_put(struct mctp_client *client) ++{ ++ kref_put(&client->ref, &i3c_target_mctp_client_free); ++} ++ ++static void ++i3c_target_mctp_rx_packet_enqueue(struct i3c_device *i3cdev, const u8 *data, size_t count) ++{ ++ struct i3c_target_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev)); ++ struct mctp_client *client; ++ struct mctp_packet *packet; ++ int ret; ++ ++ spin_lock(&priv->client_lock); ++ client = priv->client; ++ if (client) ++ i3c_target_mctp_client_get(client); ++ spin_unlock(&priv->client_lock); ++ ++ if (!client) ++ return; ++ ++ packet = i3c_target_mctp_packet_alloc(count); ++ if (!packet) ++ goto err; ++ ++ memcpy(packet->data, data, count); ++ ++ ret = ptr_ring_produce(&client->rx_queue, packet); ++ if (ret) ++ i3c_target_mctp_packet_free(packet); ++ else ++ wake_up_all(&client->wait_queue); ++err: ++ i3c_target_mctp_client_put(client); ++} ++ ++static struct mctp_client *i3c_target_mctp_create_client(struct i3c_target_mctp *priv) ++{ ++ struct mctp_client *client; ++ int ret; ++ ++ /* Currently, we support just one client. */ ++ spin_lock_irq(&priv->client_lock); ++ ret = priv->client ? -EBUSY : 0; ++ spin_unlock_irq(&priv->client_lock); ++ ++ if (ret) ++ return ERR_PTR(ret); ++ ++ client = i3c_target_mctp_client_alloc(priv); ++ if (!client) ++ return ERR_PTR(-ENOMEM); ++ ++ init_waitqueue_head(&client->wait_queue); ++ ++ spin_lock_irq(&priv->client_lock); ++ priv->client = client; ++ spin_unlock_irq(&priv->client_lock); ++ ++ return client; ++} ++ ++static void i3c_target_mctp_delete_client(struct mctp_client *client) ++{ ++ struct i3c_target_mctp *priv = client->priv; ++ ++ spin_lock_irq(&priv->client_lock); ++ priv->client = NULL; ++ spin_unlock_irq(&priv->client_lock); ++ ++ i3c_target_mctp_client_put(client); ++} ++ ++static int i3c_target_mctp_open(struct inode *inode, struct file *file) ++{ ++ struct i3c_target_mctp *priv = container_of(inode->i_cdev, struct i3c_target_mctp, cdev); ++ struct mctp_client *client; ++ ++ client = i3c_target_mctp_create_client(priv); ++ if (IS_ERR(client)) ++ return PTR_ERR(client); ++ ++ file->private_data = client; ++ ++ return 0; ++} ++ ++static int i3c_target_mctp_release(struct inode *inode, struct file *file) ++{ ++ struct mctp_client *client = file->private_data; ++ ++ i3c_target_mctp_delete_client(client); ++ ++ return 0; ++} ++ ++static ssize_t i3c_target_mctp_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct mctp_client *client = file->private_data; ++ struct mctp_packet *rx_packet; ++ ++ rx_packet = ptr_ring_consume_irq(&client->rx_queue); ++ if (!rx_packet) ++ return -EAGAIN; ++ ++ if (count < rx_packet->count) { ++ count = -EINVAL; ++ goto err_free; ++ } ++ if (count > rx_packet->count) ++ count = rx_packet->count; ++ ++ if (copy_to_user(buf, rx_packet->data, count)) ++ count = -EFAULT; ++err_free: ++ i3c_target_mctp_packet_free(rx_packet); ++ ++ return count; ++} ++ ++static u8 *pec_append(u8 addr_rnw, u8 *buf, u8 len) ++{ ++ u8 pec_v; ++ ++ pec_v = crc8(i3c_crc8_table, &addr_rnw, 1, 0); ++ pec_v = crc8(i3c_crc8_table, buf, len, pec_v); ++ buf[len] = pec_v; ++ ++ return buf; ++} ++ ++static ssize_t i3c_target_mctp_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct mctp_client *client = file->private_data; ++ struct i3c_target_mctp *priv = client->priv; ++ struct i3c_priv_xfer xfers[2] = {}; ++ struct i3c_device_info info; ++ u8 *tx_data; ++ u8 *ibi_data; ++ int ret; ++ bool ibi_enabled = i3c_device_is_ibi_enabled(priv->i3cdev); ++ ++ if (!ibi_enabled) { ++ dev_warn(i3cdev_to_dev(priv->i3cdev), "IBI not enabled\n"); ++ return count; ++ } ++ if (priv->mdb_append_pec) ++ ibi_data = kzalloc(2, GFP_KERNEL); ++ else ++ ibi_data = kzalloc(1, GFP_KERNEL); ++ if (!ibi_data) ++ return -ENOMEM; ++ ibi_data[0] = I3C_MCTP_MDB; ++ ++ tx_data = kzalloc(count, GFP_KERNEL); ++ if (!tx_data) { ++ ret = -ENOMEM; ++ goto free_ibi; ++ } ++ ++ if (copy_from_user(tx_data, buf, count)) { ++ ret = -EFAULT; ++ goto out_packet; ++ } ++ ++ i3c_device_get_info(priv->i3cdev, &info); ++ if (priv->mdb_append_pec) { ++ pec_append(info.dyn_addr << 1 | 0x1, ibi_data, 1); ++ xfers[0].len = 2; ++ } else { ++ xfers[0].len = 1; ++ } ++ xfers[0].data.out = ibi_data; ++ ++ xfers[1].data.out = tx_data; ++ xfers[1].len = count; ++ ++ ret = i3c_device_pending_read_notify(priv->i3cdev, &xfers[1], ++ &xfers[0]); ++ if (ret) ++ goto out_packet; ++ ret = count; ++ ++out_packet: ++ kfree(tx_data); ++free_ibi: ++ kfree(ibi_data); ++ return ret; ++} ++ ++static __poll_t i3c_target_mctp_poll(struct file *file, struct poll_table_struct *pt) ++{ ++ struct mctp_client *client = file->private_data; ++ __poll_t ret = 0; ++ ++ poll_wait(file, &client->wait_queue, pt); ++ ++ if (__ptr_ring_peek(&client->rx_queue)) ++ ret |= EPOLLIN; ++ ++ /* ++ * TODO: Add support for "write" readiness. ++ * DW-I3C has a hardware queue that has finite number of entries. ++ * If we try to issue more writes that space in this queue allows for, ++ * we're in trouble. This should be handled by error from write() and ++ * poll() blocking for write events. ++ */ ++ return ret; ++} ++ ++static const struct file_operations i3c_target_mctp_fops = { ++ .owner = THIS_MODULE, ++ .open = i3c_target_mctp_open, ++ .release = i3c_target_mctp_release, ++ .read = i3c_target_mctp_read, ++ .write = i3c_target_mctp_write, ++ .poll = i3c_target_mctp_poll, ++}; ++ ++static struct i3c_target_read_setup i3c_target_mctp_rx_packet_setup = { ++ .handler = i3c_target_mctp_rx_packet_enqueue, ++}; ++ ++static ssize_t mdb_append_pec_show(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ struct i3c_device *i3cdev = dev_get_drvdata(dev); ++ struct i3c_target_mctp *priv = i3cdev_get_drvdata(i3cdev); ++ ssize_t ret; ++ ++ ret = sysfs_emit(buf, "%d\n", priv->mdb_append_pec); ++ ++ return ret; ++} ++ ++static ssize_t mdb_append_pec_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct i3c_device *i3cdev = dev_get_drvdata(dev); ++ struct i3c_target_mctp *priv = i3cdev_get_drvdata(i3cdev); ++ bool res; ++ int ret; ++ ++ ret = kstrtobool(buf, &res); ++ if (ret) ++ return ret; ++ ++ priv->mdb_append_pec = res; ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RW(mdb_append_pec); ++ ++static int i3c_target_mctp_probe(struct i3c_device *i3cdev) ++{ ++ struct device *parent = i3cdev_to_dev(i3cdev); ++ struct i3c_target_mctp *priv; ++ struct device *dev; ++ int ret; ++ ++ priv = devm_kzalloc(parent, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ ret = ida_alloc(&i3c_target_mctp_ida, GFP_KERNEL); ++ if (ret < 0) ++ return ret; ++ priv->id = ret; ++ ++ priv->i3cdev = i3cdev; ++ spin_lock_init(&priv->client_lock); ++ ++ cdev_init(&priv->cdev, &i3c_target_mctp_fops); ++ priv->cdev.owner = THIS_MODULE; ++ ++ ret = cdev_add(&priv->cdev, ++ MKDEV(MAJOR(i3c_target_mctp_devt), priv->id), 1); ++ if (ret) { ++ ida_free(&i3c_target_mctp_ida, priv->id); ++ return ret; ++ } ++ ++ dev = device_create(i3c_target_mctp_class, parent, ++ MKDEV(MAJOR(i3c_target_mctp_devt), priv->id), i3cdev, ++ "i3c-mctp-target-%d", priv->id); ++ if (IS_ERR(dev)) { ++ ret = PTR_ERR(dev); ++ goto err; ++ } ++ ++ /* ++ * By default, the PEC is appended to the MDB as a hardware workaround for the AST2600 I3C ++ * controller as primary controller. ++ */ ++ priv->mdb_append_pec = 1; ++ ++ ret = device_create_file(dev, &dev_attr_mdb_append_pec); ++ if (unlikely(ret)) { ++ dev_err(dev, "Failed creating device attrs\n"); ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ i3cdev_set_drvdata(i3cdev, priv); ++ ++ i3c_target_read_register(i3cdev, &i3c_target_mctp_rx_packet_setup); ++ ++ crc8_populate_msb(i3c_crc8_table, I3C_CRC8_POLYNOMIAL); ++ ++ return 0; ++err: ++ cdev_del(&priv->cdev); ++ ida_free(&i3c_target_mctp_ida, priv->id); ++ ++ return ret; ++} ++ ++static void i3c_target_mctp_remove(struct i3c_device *i3cdev) ++{ ++ struct i3c_target_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev)); ++ ++ device_destroy(i3c_target_mctp_class, i3c_target_mctp_devt); ++ cdev_del(&priv->cdev); ++ ida_free(&i3c_target_mctp_ida, priv->id); ++} ++ ++static const struct i3c_device_id i3c_target_mctp_ids[] = { ++ I3C_CLASS(0xcc, 0x0), ++ { }, ++}; ++ ++static struct i3c_driver i3c_target_mctp_drv = { ++ .driver.name = "i3c-target-mctp", ++ .id_table = i3c_target_mctp_ids, ++ .probe = i3c_target_mctp_probe, ++ .remove = i3c_target_mctp_remove, ++ .target = true, ++}; ++ ++static int i3c_target_mctp_init(struct i3c_driver *drv) ++{ ++ int ret; ++ ++ ret = alloc_chrdev_region(&i3c_target_mctp_devt, 0, ++ I3C_TARGET_MCTP_MINORS, "i3c-target-mctp"); ++ if (ret) ++ return ret; ++ ++ i3c_target_mctp_class = class_create("i3c-target-mctp"); ++ if (IS_ERR(i3c_target_mctp_class)) { ++ unregister_chrdev_region(i3c_target_mctp_devt, I3C_TARGET_MCTP_MINORS); ++ return PTR_ERR(i3c_target_mctp_class); ++ } ++ ++ return i3c_driver_register(drv); ++} ++ ++static void i3c_target_mctp_fini(struct i3c_driver *drv) ++{ ++ i3c_driver_unregister(drv); ++ class_destroy(i3c_target_mctp_class); ++ unregister_chrdev_region(i3c_target_mctp_devt, I3C_TARGET_MCTP_MINORS); ++} ++ ++module_driver(i3c_target_mctp_drv, i3c_target_mctp_init, i3c_target_mctp_fini); ++MODULE_AUTHOR("Iwona Winiarska "); ++MODULE_DESCRIPTION("I3C Target MCTP driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c +--- a/drivers/iio/adc/aspeed_adc.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/iio/adc/aspeed_adc.c 2025-12-23 10:16:20.950035586 +0000 +@@ -72,6 +72,8 @@ + #define ASPEED_ADC_BAT_SENSING_ENABLE BIT(13) + #define ASPEED_ADC_CTRL_CHANNEL GENMASK(31, 16) + #define ASPEED_ADC_CTRL_CHANNEL_ENABLE(ch) FIELD_PREP(ASPEED_ADC_CTRL_CHANNEL, BIT(ch)) ++#define ADC_MASK(n) ((n) < 16 ? ((1U << (n)) - 1) : 0xFFFF) ++#define ASPEED_ADC_CTRL_CHANNELS_ENABLE(chs) FIELD_PREP(ASPEED_ADC_CTRL_CHANNEL, ADC_MASK(chs)) + + #define ASPEED_ADC_INIT_POLLING_TIME 500 + #define ASPEED_ADC_INIT_TIMEOUT 500000 +@@ -82,6 +84,8 @@ + */ + #define ASPEED_ADC_DEF_SAMPLING_RATE 65000 + ++static DEFINE_IDA(aspeed_adc_ida); ++ + struct aspeed_adc_trim_locate { + const unsigned int offset; + const unsigned int field; +@@ -95,6 +99,7 @@ + bool wait_init_sequence; + bool need_prescaler; + bool bat_sense_sup; ++ bool require_extra_eoc; + u8 scaler_bit_width; + unsigned int num_channels; + const struct aspeed_adc_trim_locate *trim_locate; +@@ -107,6 +112,7 @@ + + struct aspeed_adc_data { + struct device *dev; ++ int id; + const struct aspeed_adc_model_data *model_data; + void __iomem *base; + spinlock_t clk_lock; +@@ -119,6 +125,26 @@ + int cv; + bool battery_sensing; + struct adc_gain battery_mode_gain; ++ unsigned int required_eoc_num; ++ u16 *upper_bound; ++ u16 *lower_bound; ++ bool *upper_en; ++ bool *lower_en; ++}; ++ ++static const struct iio_event_spec aspeed_adc_events[] = { ++ { ++ .type = IIO_EV_TYPE_THRESH, ++ .dir = IIO_EV_DIR_RISING, ++ .mask_separate = ++ BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), ++ }, ++ { ++ .type = IIO_EV_TYPE_THRESH, ++ .dir = IIO_EV_DIR_FALLING, ++ .mask_separate = ++ BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE), ++ }, + }; + + #define ASPEED_CHAN(_idx, _data_reg_addr) { \ +@@ -130,6 +156,8 @@ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ ++ .event_spec = aspeed_adc_events, \ ++ .num_event_specs = ARRAY_SIZE(aspeed_adc_events), \ + } + + static const struct iio_chan_spec aspeed_adc_iio_channels[] = { +@@ -174,18 +202,11 @@ + + static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev) + { +- struct device_node *syscon; + struct regmap *scu; + u32 scu_otp, trimming_val; + struct aspeed_adc_data *data = iio_priv(indio_dev); + +- syscon = of_find_node_by_name(NULL, "syscon"); +- if (syscon == NULL) { +- dev_warn(data->dev, "Couldn't find syscon node\n"); +- return -EOPNOTSUPP; +- } +- scu = syscon_node_to_regmap(syscon); +- of_node_put(syscon); ++ scu = syscon_regmap_lookup_by_phandle(data->dev->of_node, "aspeed,scu"); + if (IS_ERR(scu)) { + dev_warn(data->dev, "Failed to get syscon regmap\n"); + return -EOPNOTSUPP; +@@ -276,36 +297,68 @@ + return 0; + } + ++static int aspeed_adc_get_voltage_raw(struct aspeed_adc_data *data, struct iio_chan_spec const *chan) ++{ ++ int val; ++ ++ val = readw(data->base + chan->address); ++ dev_dbg(data->dev, ++ "%d upper_bound: %d %x, lower_bound: %d %x, delay: %d * %d ns", ++ chan->channel, data->upper_en[chan->channel], ++ data->upper_bound[chan->channel], data->lower_en[chan->channel], ++ data->lower_bound[chan->channel], data->sample_period_ns, ++ data->required_eoc_num); ++ if (data->upper_en[chan->channel]) { ++ if (val >= data->upper_bound[chan->channel]) { ++ ndelay(data->sample_period_ns * ++ data->required_eoc_num); ++ val = readw(data->base + chan->address); ++ } ++ } ++ if (data->lower_en[chan->channel]) { ++ if (val <= data->lower_bound[chan->channel]) { ++ ndelay(data->sample_period_ns * ++ data->required_eoc_num); ++ val = readw(data->base + chan->address); ++ } ++ } ++ return val; ++} ++ + static int aspeed_adc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) + { + struct aspeed_adc_data *data = iio_priv(indio_dev); +- u32 adc_engine_control_reg_val; ++ u32 engine_ctrl_tmp_val, reg_val; + + switch (mask) { + case IIO_CHAN_INFO_RAW: +- if (data->battery_sensing && chan->channel == 7) { +- adc_engine_control_reg_val = +- readl(data->base + ASPEED_REG_ENGINE_CONTROL); +- writel(adc_engine_control_reg_val | +- FIELD_PREP(ASPEED_ADC_CH7_MODE, +- ASPEED_ADC_CH7_BAT) | +- ASPEED_ADC_BAT_SENSING_ENABLE, +- data->base + ASPEED_REG_ENGINE_CONTROL); ++ if (data->model_data->bat_sense_sup && ++ chan->channel == data->model_data->num_channels - 1) { ++ engine_ctrl_tmp_val = readl(data->base + ASPEED_REG_ENGINE_CONTROL); ++ reg_val = engine_ctrl_tmp_val & ++ ~ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels); ++ reg_val |= ASPEED_ADC_CTRL_CHANNEL_ENABLE(chan->channel); ++ if (data->battery_sensing) ++ reg_val |= FIELD_PREP(ASPEED_ADC_CH7_MODE, ASPEED_ADC_CH7_BAT) | ++ ASPEED_ADC_BAT_SENSING_ENABLE; ++ writel(reg_val, data->base + ASPEED_REG_ENGINE_CONTROL); + /* + * After enable battery sensing mode need to wait some time for adc stable + * Experiment result is 1ms. + */ + mdelay(1); +- *val = readw(data->base + chan->address); +- *val = (*val * data->battery_mode_gain.mult) / +- data->battery_mode_gain.div; ++ *val = aspeed_adc_get_voltage_raw(data, chan); ++ if (data->battery_sensing) ++ *val = (*val * data->battery_mode_gain.mult) / ++ data->battery_mode_gain.div; + /* Restore control register value */ +- writel(adc_engine_control_reg_val, ++ writel(engine_ctrl_tmp_val, + data->base + ASPEED_REG_ENGINE_CONTROL); +- } else +- *val = readw(data->base + chan->address); ++ } else { ++ *val = aspeed_adc_get_voltage_raw(data, chan); ++ } + return IIO_VAL_INT; + + case IIO_CHAN_INFO_OFFSET: +@@ -368,9 +421,106 @@ + return 0; + } + ++static int aspeed_adc_read_event_config(struct iio_dev *indio_dev, ++ const struct iio_chan_spec *chan, ++ enum iio_event_type type, ++ enum iio_event_direction dir) ++{ ++ struct aspeed_adc_data *data = iio_priv(indio_dev); ++ ++ switch (dir) { ++ case IIO_EV_DIR_RISING: ++ return data->upper_en[chan->channel]; ++ case IIO_EV_DIR_FALLING: ++ return data->lower_en[chan->channel]; ++ default: ++ return -EINVAL; ++ } ++} ++ ++static int aspeed_adc_write_event_config(struct iio_dev *indio_dev, ++ const struct iio_chan_spec *chan, ++ enum iio_event_type type, ++ enum iio_event_direction dir, ++ int state) ++{ ++ struct aspeed_adc_data *data = iio_priv(indio_dev); ++ ++ switch (dir) { ++ case IIO_EV_DIR_RISING: ++ data->upper_en[chan->channel] = state ? 1 : 0; ++ break; ++ case IIO_EV_DIR_FALLING: ++ data->lower_en[chan->channel] = state ? 1 : 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int aspeed_adc_write_event_value(struct iio_dev *indio_dev, ++ const struct iio_chan_spec *chan, ++ enum iio_event_type type, ++ enum iio_event_direction dir, ++ enum iio_event_info info, int val, ++ int val2) ++{ ++ struct aspeed_adc_data *data = iio_priv(indio_dev); ++ ++ if (info != IIO_EV_INFO_VALUE) ++ return -EINVAL; ++ ++ switch (dir) { ++ case IIO_EV_DIR_RISING: ++ if (val >= BIT(ASPEED_RESOLUTION_BITS)) ++ return -EINVAL; ++ data->upper_bound[chan->channel] = val; ++ break; ++ case IIO_EV_DIR_FALLING: ++ data->lower_bound[chan->channel] = val; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int aspeed_adc_read_event_value(struct iio_dev *indio_dev, ++ const struct iio_chan_spec *chan, ++ enum iio_event_type type, ++ enum iio_event_direction dir, ++ enum iio_event_info info, int *val, ++ int *val2) ++{ ++ struct aspeed_adc_data *data = iio_priv(indio_dev); ++ ++ if (info != IIO_EV_INFO_VALUE) ++ return -EINVAL; ++ ++ switch (dir) { ++ case IIO_EV_DIR_RISING: ++ *val = data->upper_bound[chan->channel]; ++ break; ++ case IIO_EV_DIR_FALLING: ++ *val = data->lower_bound[chan->channel]; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return IIO_VAL_INT; ++} ++ + static const struct iio_info aspeed_adc_iio_info = { + .read_raw = aspeed_adc_read_raw, + .write_raw = aspeed_adc_write_raw, ++ .read_event_config = &aspeed_adc_read_event_config, ++ .write_event_config = &aspeed_adc_write_event_config, ++ .read_event_value = &aspeed_adc_read_event_value, ++ .write_event_value = &aspeed_adc_write_event_value, + .debugfs_reg_access = aspeed_adc_reg_access, + }; + +@@ -381,6 +531,13 @@ + clk_hw_unregister_fixed_factor(clk); + } + ++static void aspeed_adc_ida_remove(void *data) ++{ ++ struct aspeed_adc_data *priv_data = data; ++ ++ ida_simple_remove(&aspeed_adc_ida, priv_data->id); ++} ++ + static void aspeed_adc_reset_assert(void *data) + { + struct reset_control *rst = data; +@@ -415,6 +572,7 @@ + } + adc_engine_control_reg_val = + readl(data->base + ASPEED_REG_ENGINE_CONTROL); ++ adc_engine_control_reg_val &= ~ASPEED_ADC_REF_VOLTAGE; + + ret = devm_regulator_get_enable_read_voltage(data->dev, "vref"); + if (ret < 0 && ret != -ENODEV) +@@ -474,6 +632,7 @@ + u32 adc_engine_control_reg_val; + unsigned long scaler_flags = 0; + char clk_name[32], clk_parent_name[32]; ++ const char *model_name; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*data)); + if (!indio_dev) +@@ -488,12 +647,44 @@ + if (IS_ERR(data->base)) + return PTR_ERR(data->base); + ++ data->upper_bound = devm_kzalloc(&pdev->dev, ++ sizeof(data->upper_bound) * ++ data->model_data->num_channels, ++ GFP_KERNEL); ++ if (!data->upper_bound) ++ return -ENOMEM; ++ data->upper_en = devm_kzalloc(&pdev->dev, ++ sizeof(data->upper_en) * ++ data->model_data->num_channels, ++ GFP_KERNEL); ++ if (!data->upper_en) ++ return -ENOMEM; ++ data->lower_bound = devm_kzalloc(&pdev->dev, ++ sizeof(data->lower_bound) * ++ data->model_data->num_channels, ++ GFP_KERNEL); ++ if (!data->lower_bound) ++ return -ENOMEM; ++ data->lower_en = devm_kzalloc(&pdev->dev, ++ sizeof(data->lower_en) * ++ data->model_data->num_channels, ++ GFP_KERNEL); ++ if (!data->lower_en) ++ return -ENOMEM; ++ data->id = ida_simple_get(&aspeed_adc_ida, 0, 0, GFP_KERNEL); ++ if (data->id < 0) ++ return data->id; ++ ret = devm_add_action_or_reset(data->dev, aspeed_adc_ida_remove, data); ++ if (ret) ++ return ret; ++ model_name = kasprintf(GFP_KERNEL, "%s-%d", ++ data->model_data->model_name, data->id); + /* Register ADC clock prescaler with source specified by device tree. */ + spin_lock_init(&data->clk_lock); + snprintf(clk_parent_name, ARRAY_SIZE(clk_parent_name), "%s", + of_clk_get_parent_name(pdev->dev.of_node, 0)); + snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-fixed-div", +- data->model_data->model_name); ++ model_name); + data->fixed_div_clk = clk_hw_register_fixed_factor( + &pdev->dev, clk_name, clk_parent_name, 0, 1, 2); + if (IS_ERR(data->fixed_div_clk)) +@@ -508,7 +699,7 @@ + + if (data->model_data->need_prescaler) { + snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-prescaler", +- data->model_data->model_name); ++ model_name); + data->clk_prescaler = devm_clk_hw_register_divider( + &pdev->dev, clk_name, clk_parent_name, 0, + data->base + ASPEED_REG_CLOCK_CONTROL, 17, 15, 0, +@@ -524,7 +715,7 @@ + * setting to adjust the prescaler as well. + */ + snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-scaler", +- data->model_data->model_name); ++ model_name); + data->clk_scaler = devm_clk_hw_register_divider( + &pdev->dev, clk_name, clk_parent_name, scaler_flags, + data->base + ASPEED_REG_CLOCK_CONTROL, 0, +@@ -612,13 +803,25 @@ + + aspeed_adc_compensation(indio_dev); + /* Start all channels in normal mode. */ +- adc_engine_control_reg_val = +- readl(data->base + ASPEED_REG_ENGINE_CONTROL); +- adc_engine_control_reg_val |= ASPEED_ADC_CTRL_CHANNEL; ++ adc_engine_control_reg_val = readl(data->base + ASPEED_REG_ENGINE_CONTROL); ++ /* Disable the last channel when the controller supports battery sensing */ ++ if (data->model_data->bat_sense_sup) ++ adc_engine_control_reg_val |= ++ ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels - 1); ++ else ++ adc_engine_control_reg_val |= ++ ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels); + writel(adc_engine_control_reg_val, + data->base + ASPEED_REG_ENGINE_CONTROL); +- +- indio_dev->name = data->model_data->model_name; ++ adc_engine_control_reg_val = ++ FIELD_GET(ASPEED_ADC_CTRL_CHANNEL, ++ readl(data->base + ASPEED_REG_ENGINE_CONTROL)); ++ data->required_eoc_num = hweight_long(adc_engine_control_reg_val); ++ if (data->model_data->require_extra_eoc && ++ (adc_engine_control_reg_val & ++ BIT(data->model_data->num_channels - 1))) ++ data->required_eoc_num += 12; ++ indio_dev->name = model_name; + indio_dev->info = &aspeed_adc_iio_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = data->battery_sensing ? +@@ -645,6 +848,16 @@ + .field = GENMASK(7, 4), + }; + ++static const struct aspeed_adc_trim_locate ast2700_adc0_trim = { ++ .offset = 0x820, ++ .field = GENMASK(3, 0), ++}; ++ ++static const struct aspeed_adc_trim_locate ast2700_adc1_trim = { ++ .offset = 0x820, ++ .field = GENMASK(7, 4), ++}; ++ + static const struct aspeed_adc_model_data ast2400_model_data = { + .model_name = "ast2400-adc", + .vref_fixed_mv = 2500, +@@ -653,6 +866,7 @@ + .need_prescaler = true, + .scaler_bit_width = 10, + .num_channels = 16, ++ .require_extra_eoc = 0, + }; + + static const struct aspeed_adc_model_data ast2500_model_data = { +@@ -665,6 +879,7 @@ + .scaler_bit_width = 10, + .num_channels = 16, + .trim_locate = &ast2500_adc_trim, ++ .require_extra_eoc = 0, + }; + + static const struct aspeed_adc_model_data ast2600_adc0_model_data = { +@@ -676,6 +891,7 @@ + .scaler_bit_width = 16, + .num_channels = 8, + .trim_locate = &ast2600_adc0_trim, ++ .require_extra_eoc = 1, + }; + + static const struct aspeed_adc_model_data ast2600_adc1_model_data = { +@@ -687,6 +903,29 @@ + .scaler_bit_width = 16, + .num_channels = 8, + .trim_locate = &ast2600_adc1_trim, ++ .require_extra_eoc = 1, ++}; ++ ++static const struct aspeed_adc_model_data ast2700_adc0_model_data = { ++ .model_name = "ast2700-adc0", ++ .min_sampling_rate = 10000, ++ .max_sampling_rate = 500000, ++ .wait_init_sequence = true, ++ .bat_sense_sup = true, ++ .scaler_bit_width = 16, ++ .num_channels = 8, ++ .trim_locate = &ast2700_adc0_trim, ++}; ++ ++static const struct aspeed_adc_model_data ast2700_adc1_model_data = { ++ .model_name = "ast2700-adc1", ++ .min_sampling_rate = 10000, ++ .max_sampling_rate = 500000, ++ .wait_init_sequence = true, ++ .bat_sense_sup = true, ++ .scaler_bit_width = 16, ++ .num_channels = 8, ++ .trim_locate = &ast2700_adc1_trim, + }; + + static const struct of_device_id aspeed_adc_matches[] = { +@@ -694,6 +933,8 @@ + { .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data }, + { .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data }, + { .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data }, ++ { .compatible = "aspeed,ast2700-adc0", .data = &ast2700_adc0_model_data }, ++ { .compatible = "aspeed,ast2700-adc1", .data = &ast2700_adc1_model_data }, + { } + }; + MODULE_DEVICE_TABLE(of, aspeed_adc_matches); +diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile +--- a/drivers/irqchip/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/irqchip/Makefile 2025-12-23 10:16:13.379162479 +0000 +@@ -83,8 +83,7 @@ + obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o + obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o + obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o +-obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o +-obj-$(CONFIG_STM32MP_EXTI) += irq-stm32mp-exti.o ++obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o irq-aspeed-intc.o irq-aspeed-e2m-ic.o + obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o + obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o + obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o +diff --git a/drivers/irqchip/irq-aspeed-e2m-ic.c b/drivers/irqchip/irq-aspeed-e2m-ic.c +--- a/drivers/irqchip/irq-aspeed-e2m-ic.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/irqchip/irq-aspeed-e2m-ic.c 2025-12-23 10:16:21.166031966 +0000 +@@ -0,0 +1,178 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Aspeed AST27XX E2M Interrupt Controller ++ * Copyright (C) 2023 ASPEED Technology Inc. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ASPEED_AST2700_E2M_IC_SHIFT 0 ++#define ASPEED_AST2700_E2M_IC_ENABLE \ ++ GENMASK(7, ASPEED_AST2700_E2M_IC_SHIFT) ++#define ASPEED_AST2700_E2M_IC_NUM_IRQS 8 ++#define ASPEED_AST2700_E2M_IC_EN_REG 0x14 ++#define ASPEED_AST2700_E2M_IC_STS_REG 0x18 ++ ++struct aspeed_e2m_ic { ++ unsigned long irq_enable; ++ unsigned long irq_shift; ++ unsigned int num_irqs; ++ unsigned int reg; ++ unsigned int en_reg; ++ unsigned int sts_reg; ++ struct regmap *e2m; ++ struct irq_domain *irq_domain; ++}; ++ ++static void aspeed_e2m_ic_irq_handler(struct irq_desc *desc) ++{ ++ unsigned int val; ++ unsigned long bit; ++ unsigned long enabled; ++ unsigned long max; ++ unsigned long status; ++ struct aspeed_e2m_ic *e2m_ic = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ unsigned int mask; ++ ++ chained_irq_enter(chip, desc); ++ ++ mask = e2m_ic->irq_enable; ++ regmap_read(e2m_ic->e2m, e2m_ic->en_reg, &val); ++ enabled = val & e2m_ic->irq_enable; ++ regmap_read(e2m_ic->e2m, e2m_ic->sts_reg, &val); ++ status = val & enabled; ++ ++ bit = e2m_ic->irq_shift; ++ max = e2m_ic->num_irqs + bit; ++ ++ for_each_set_bit_from(bit, &status, max) { ++ generic_handle_domain_irq(e2m_ic->irq_domain, bit - e2m_ic->irq_shift); ++ ++ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg, mask, BIT(bit)); ++ } ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static void aspeed_e2m_ic_irq_mask(struct irq_data *data) ++{ ++ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data); ++ unsigned int mask; ++ ++ mask = BIT(data->hwirq + e2m_ic->irq_shift); ++ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, 0); ++} ++ ++static void aspeed_e2m_ic_irq_unmask(struct irq_data *data) ++{ ++ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data); ++ unsigned int bit = BIT(data->hwirq + e2m_ic->irq_shift); ++ unsigned int mask; ++ ++ mask = bit; ++ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, bit); ++} ++ ++static int aspeed_e2m_ic_irq_set_affinity(struct irq_data *data, ++ const struct cpumask *dest, ++ bool force) ++{ ++ return -EINVAL; ++} ++ ++static struct irq_chip aspeed_scu_ic_chip = { ++ .name = "aspeed-e2m-ic", ++ .irq_mask = aspeed_e2m_ic_irq_mask, ++ .irq_unmask = aspeed_e2m_ic_irq_unmask, ++ .irq_set_affinity = aspeed_e2m_ic_irq_set_affinity, ++}; ++ ++static int aspeed_e2m_ic_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq); ++ irq_set_chip_data(irq, domain->host_data); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops aspeed_e2m_ic_domain_ops = { ++ .map = aspeed_e2m_ic_map, ++}; ++ ++static int aspeed_e2m_ic_of_init_common(struct aspeed_e2m_ic *e2m_ic, ++ struct device_node *node) ++{ ++ int irq; ++ int rc = 0; ++ ++ if (!node->parent) { ++ rc = -ENODEV; ++ goto err; ++ } ++ ++ e2m_ic->e2m = syscon_node_to_regmap(node->parent); ++ if (IS_ERR(e2m_ic->e2m)) { ++ rc = PTR_ERR(e2m_ic->e2m); ++ goto err; ++ } ++ ++ /* Clear status and disable all interrupt */ ++ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg, ++ e2m_ic->irq_enable, e2m_ic->irq_enable); ++ regmap_write_bits(e2m_ic->e2m, e2m_ic->en_reg, ++ e2m_ic->irq_enable, 0); ++ ++ irq = irq_of_parse_and_map(node, 0); ++ if (!irq) { ++ rc = -EINVAL; ++ goto err; ++ } ++ ++ e2m_ic->irq_domain = irq_domain_add_linear(node, e2m_ic->num_irqs, ++ &aspeed_e2m_ic_domain_ops, ++ e2m_ic); ++ if (!e2m_ic->irq_domain) { ++ rc = -ENOMEM; ++ goto err; ++ } ++ ++ irq_set_chained_handler_and_data(irq, aspeed_e2m_ic_irq_handler, ++ e2m_ic); ++ ++ return 0; ++ ++err: ++ kfree(e2m_ic); ++ ++ return rc; ++} ++ ++static int __init aspeed_ast2700_e2m_ic_of_init(struct device_node *node, ++ struct device_node *parent) ++{ ++ struct aspeed_e2m_ic *e2m_ic = kzalloc(sizeof(*e2m_ic), GFP_KERNEL); ++ ++ if (!e2m_ic) ++ return -ENOMEM; ++ ++ e2m_ic->irq_enable = ASPEED_AST2700_E2M_IC_ENABLE; ++ e2m_ic->irq_shift = ASPEED_AST2700_E2M_IC_SHIFT; ++ e2m_ic->num_irqs = ASPEED_AST2700_E2M_IC_NUM_IRQS; ++ e2m_ic->en_reg = ASPEED_AST2700_E2M_IC_EN_REG; ++ e2m_ic->sts_reg = ASPEED_AST2700_E2M_IC_STS_REG; ++ ++ return aspeed_e2m_ic_of_init_common(e2m_ic, node); ++} ++ ++IRQCHIP_DECLARE(ast2700_e2m_ic, "aspeed,ast2700-e2m-ic", ++ aspeed_ast2700_e2m_ic_of_init); +diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c +--- a/drivers/irqchip/irq-aspeed-intc.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/irqchip/irq-aspeed-intc.c 2025-12-23 10:16:21.173031848 +0000 +@@ -0,0 +1,188 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Aspeed Interrupt Controller. ++ * ++ * Copyright (C) 2023 ASPEED Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define INTC_INT_ENABLE_REG 0x00 ++#define INTC_INT_STATUS_REG 0x04 ++#define INTC_IRQS_PER_WORD 32 ++#define INTC_IRQ_BASE 192 ++ ++struct aspeed_intc_ic { ++ void __iomem *base; ++ raw_spinlock_t intc_lock; ++ struct irq_domain *irq_domain; ++}; ++ ++static void aspeed_intc0_ic_irq_handler(struct irq_desc *desc) ++{ ++ struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ struct irq_data *irq_data = irq_desc_get_irq_data(desc); ++ unsigned long hwirq; ++ ++ if (!irq_data || !intc_ic) { ++ pr_err("Invalid irq_data or intc_ic\n"); ++ return; ++ } ++ ++ if (irq_data->hwirq < INTC_IRQ_BASE + 32) { ++ pr_err("Invalid hwirq: %lu\n", irq_data->hwirq); ++ return; ++ } ++ hwirq = irq_data->hwirq - INTC_IRQ_BASE - 32; /* 32 is SPI offset */ ++ ++ chained_irq_enter(chip, desc); ++ ++ generic_handle_domain_irq(intc_ic->irq_domain, hwirq); ++ ++ /* ++ * TODO: This a WA to prevnet potential race conditions when ++ * multiple interrupts are processed in multi-core environment. ++ */ ++ raw_spin_lock(&intc_ic->intc_lock); ++ writel(BIT(hwirq), intc_ic->base + INTC_INT_STATUS_REG); ++ raw_spin_unlock(&intc_ic->intc_lock); ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static void aspeed_intc1_ic_irq_handler(struct irq_desc *desc) ++{ ++ struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ unsigned long bit, status; ++ ++ if (!intc_ic) { ++ pr_err("Invalid intc_ic\n"); ++ return; ++ } ++ ++ chained_irq_enter(chip, desc); ++ ++ status = readl(intc_ic->base + INTC_INT_STATUS_REG); ++ ++ for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) { ++ generic_handle_domain_irq(intc_ic->irq_domain, bit); ++ writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG); ++ } ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static void aspeed_intc_irq_mask(struct irq_data *data) ++{ ++ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); ++ unsigned int mask; ++ ++ guard(raw_spinlock)(&intc_ic->intc_lock); ++ mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq); ++ writel(mask, intc_ic->base + INTC_INT_ENABLE_REG); ++} ++ ++static void aspeed_intc_irq_unmask(struct irq_data *data) ++{ ++ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data); ++ unsigned int unmask; ++ ++ guard(raw_spinlock)(&intc_ic->intc_lock); ++ unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq); ++ writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG); ++} ++ ++static struct irq_chip aspeed_intc_chip = { ++ .name = "ASPEED INTC", ++ .irq_mask = aspeed_intc_irq_mask, ++ .irq_unmask = aspeed_intc_irq_unmask, ++}; ++ ++static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq); ++ irq_set_chip_data(irq, domain->host_data); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = { ++ .map = aspeed_intc_ic_map_irq_domain, ++}; ++ ++static int __init aspeed_intc_ic_of_init(struct device_node *node, ++ struct device_node *parent) ++{ ++ struct aspeed_intc_ic *intc_ic; ++ int ret = 0; ++ int irq, irq_count, i; ++ ++ intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL); ++ if (!intc_ic) ++ return -ENOMEM; ++ ++ intc_ic->base = of_iomap(node, 0); ++ if (!intc_ic->base) { ++ pr_err("Failed to iomap intc_ic base\n"); ++ ret = -ENOMEM; ++ goto err_free_ic; ++ } ++ writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG); ++ writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG); ++ ++ intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD, ++ &aspeed_intc_ic_irq_domain_ops, intc_ic); ++ if (!intc_ic->irq_domain) { ++ ret = -ENOMEM; ++ goto err_iounmap; ++ } ++ ++ raw_spin_lock_init(&intc_ic->intc_lock); ++ ++ irq_count = of_irq_count(node); ++ if (irq_count == 0) { ++ pr_err("Failed to get irq count\n"); ++ ret = -EINVAL; ++ goto err_iounmap; ++ } ++ ++ for (i = 0; i < irq_count; i++) { ++ irq = irq_of_parse_and_map(node, i); ++ if (!irq) { ++ pr_err("Failed to get irq number\n"); ++ ret = -EINVAL; ++ goto err_iounmap; ++ } else { ++ if (irq_count > 1) ++ irq_set_chained_handler_and_data(irq, aspeed_intc0_ic_irq_handler, intc_ic); ++ else ++ irq_set_chained_handler_and_data(irq, aspeed_intc1_ic_irq_handler, intc_ic); ++ } ++ } ++ ++ return 0; ++ ++err_iounmap: ++ for (i = 0; i < irq_count; i++) { ++ irq = irq_of_parse_and_map(node, i); ++ if (irq) ++ irq_dispose_mapping(irq); ++ } ++ iounmap(intc_ic->base); ++err_free_ic: ++ kfree(intc_ic); ++ return ret; ++} ++ ++IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init); +diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c +--- a/drivers/irqchip/irq-aspeed-scu-ic.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/irqchip/irq-aspeed-scu-ic.c 2025-12-23 10:16:21.177031781 +0000 +@@ -1,61 +1,76 @@ + // SPDX-License-Identifier: GPL-2.0-or-later + /* +- * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller ++ * Aspeed AST24XX, AST25XX, AST26XX, and AST27XX SCU Interrupt Controller + * Copyright 2019 IBM Corporation + * + * Eddie James + */ + + #include ++#include + #include + #include + #include + #include +-#include ++#include + #include +-#include + +-#define ASPEED_SCU_IC_REG 0x018 +-#define ASPEED_SCU_IC_SHIFT 0 +-#define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT) +-#define ASPEED_SCU_IC_NUM_IRQS 7 + #define ASPEED_SCU_IC_STATUS GENMASK(28, 16) + #define ASPEED_SCU_IC_STATUS_SHIFT 16 ++#define AST2700_SCU_IC_STATUS GENMASK(15, 0) + +-#define ASPEED_AST2600_SCU_IC0_REG 0x560 +-#define ASPEED_AST2600_SCU_IC0_SHIFT 0 +-#define ASPEED_AST2600_SCU_IC0_ENABLE \ +- GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT) +-#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6 +- +-#define ASPEED_AST2600_SCU_IC1_REG 0x570 +-#define ASPEED_AST2600_SCU_IC1_SHIFT 4 +-#define ASPEED_AST2600_SCU_IC1_ENABLE \ +- GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT) +-#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2 ++struct aspeed_scu_ic_variant { ++ const char *compatible; ++ unsigned long irq_enable; ++ unsigned long irq_shift; ++ unsigned int num_irqs; ++ bool split_ier_isr; ++ unsigned long ier; ++ unsigned long isr; ++}; ++ ++#define SCU_VARIANT(_compat, _shift, _enable, _num, _split, _ier, _isr) { \ ++ .compatible = _compat, \ ++ .irq_shift = _shift, \ ++ .irq_enable = _enable, \ ++ .num_irqs = _num, \ ++ .split_ier_isr = _split, \ ++ .ier = _ier, \ ++ .isr = _isr, \ ++} ++ ++static const struct aspeed_scu_ic_variant scu_ic_variants[] __initconst = { ++ SCU_VARIANT("aspeed,ast2400-scu-ic", 0, GENMASK(15, 0), 7, false, 0, 0), ++ SCU_VARIANT("aspeed,ast2500-scu-ic", 0, GENMASK(15, 0), 7, false, 0, 0), ++ SCU_VARIANT("aspeed,ast2600-scu-ic0", 0, GENMASK(5, 0), 6, false, 0, 0), ++ SCU_VARIANT("aspeed,ast2600-scu-ic1", 4, GENMASK(5, 4), 2, false, 0, 0), ++ SCU_VARIANT("aspeed,ast2700-scu-ic0", 0, GENMASK(3, 0), 4, true, 0x00, 0x04), ++ SCU_VARIANT("aspeed,ast2700-scu-ic1", 0, GENMASK(3, 0), 4, true, 0x00, 0x04), ++ SCU_VARIANT("aspeed,ast2700-scu-ic2", 0, GENMASK(3, 0), 4, true, 0x04, 0x00), ++ SCU_VARIANT("aspeed,ast2700-scu-ic3", 0, GENMASK(1, 0), 2, true, 0x04, 0x00), ++}; + + struct aspeed_scu_ic { +- unsigned long irq_enable; +- unsigned long irq_shift; +- unsigned int num_irqs; +- unsigned int reg; +- struct regmap *scu; +- struct irq_domain *irq_domain; ++ unsigned long irq_enable; ++ unsigned long irq_shift; ++ unsigned int num_irqs; ++ void __iomem *base; ++ struct irq_domain *irq_domain; ++ bool split_ier_isr; ++ unsigned long ier; ++ unsigned long isr; + }; + +-static void aspeed_scu_ic_irq_handler(struct irq_desc *desc) +-{ +- unsigned int sts; +- unsigned long bit; +- unsigned long enabled; +- unsigned long max; +- unsigned long status; ++static void aspeed_scu_ic_irq_handler_combined(struct irq_desc *desc) ++{ + struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); +- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; ++ unsigned long bit, enabled, max, status; ++ unsigned int sts, mask; + + chained_irq_enter(chip, desc); + ++ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT; + /* + * The SCU IC has just one register to control its operation and read + * status. The interrupt enable bits occupy the lower 16 bits of the +@@ -66,7 +81,7 @@ + * shifting the status down to get the mapping and then back up to + * clear the bit. + */ +- regmap_read(scu_ic->scu, scu_ic->reg, &sts); ++ sts = readl(scu_ic->base); + enabled = sts & scu_ic->irq_enable; + status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled; + +@@ -76,15 +91,41 @@ + for_each_set_bit_from(bit, &status, max) { + generic_handle_domain_irq(scu_ic->irq_domain, + bit - scu_ic->irq_shift); ++ writel((readl(scu_ic->base) & ~mask) | ++ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT), ++ scu_ic->base); ++ } ++ ++ chained_irq_exit(chip, desc); ++} ++ ++static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc) ++{ ++ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc); ++ struct irq_chip *chip = irq_desc_get_chip(desc); ++ unsigned long bit, enabled, max, status; ++ unsigned int sts, mask; ++ ++ chained_irq_enter(chip, desc); + +- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask, +- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT)); ++ mask = scu_ic->irq_enable; ++ sts = readl(scu_ic->base + scu_ic->isr); ++ enabled = sts & scu_ic->irq_enable; ++ sts = readl(scu_ic->base + scu_ic->isr); ++ status = sts & enabled; ++ ++ bit = scu_ic->irq_shift; ++ max = scu_ic->num_irqs + bit; ++ ++ for_each_set_bit_from(bit, &status, max) { ++ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift); ++ writel(BIT(bit), scu_ic->base + scu_ic->isr); // clear interrupt + } + + chained_irq_exit(chip, desc); + } + +-static void aspeed_scu_ic_irq_mask(struct irq_data *data) ++static void aspeed_scu_ic_irq_mask_combined(struct irq_data *data) + { + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) | +@@ -95,10 +136,10 @@ + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ +- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0); ++ writel(readl(scu_ic->base) & ~mask, scu_ic->base); + } + +-static void aspeed_scu_ic_irq_unmask(struct irq_data *data) ++static void aspeed_scu_ic_irq_unmask_combined(struct irq_data *data) + { + struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); + unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); +@@ -110,7 +151,23 @@ + * operation from clearing the status bits, they should be under the + * mask and written with 0. + */ +- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit); ++ writel((readl(scu_ic->base) & ~mask) | bit, scu_ic->base); ++} ++ ++static void aspeed_scu_ic_irq_mask_split(struct irq_data *data) ++{ ++ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); ++ ++ writel(readl(scu_ic->base) & ~BIT(data->hwirq + scu_ic->irq_shift), ++ scu_ic->base + scu_ic->ier); ++} ++ ++static void aspeed_scu_ic_irq_unmask_split(struct irq_data *data) ++{ ++ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data); ++ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift); ++ ++ writel(readl(scu_ic->base) | bit, scu_ic->base + scu_ic->ier); + } + + static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data, +@@ -120,17 +177,29 @@ + return -EINVAL; + } + +-static struct irq_chip aspeed_scu_ic_chip = { +- .name = "aspeed-scu-ic", +- .irq_mask = aspeed_scu_ic_irq_mask, +- .irq_unmask = aspeed_scu_ic_irq_unmask, +- .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, ++static struct irq_chip aspeed_scu_ic_chip_combined = { ++ .name = "aspeed-scu-ic", ++ .irq_mask = aspeed_scu_ic_irq_mask_combined, ++ .irq_unmask = aspeed_scu_ic_irq_unmask_combined, ++ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, ++}; ++ ++static struct irq_chip aspeed_scu_ic_chip_split = { ++ .name = "ast2700-scu-ic", ++ .irq_mask = aspeed_scu_ic_irq_mask_split, ++ .irq_unmask = aspeed_scu_ic_irq_unmask_split, ++ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity, + }; + + static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) + { +- irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq); ++ struct aspeed_scu_ic *scu_ic = domain->host_data; ++ ++ if (scu_ic->split_ier_isr) ++ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_split, handle_level_irq); ++ else ++ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_combined, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +@@ -146,18 +215,19 @@ + int irq; + int rc = 0; + +- if (!node->parent) { +- rc = -ENODEV; ++ scu_ic->base = of_iomap(node, 0); ++ if (IS_ERR(scu_ic->base)) { ++ rc = PTR_ERR(scu_ic->base); + goto err; + } + +- scu_ic->scu = syscon_node_to_regmap(node->parent); +- if (IS_ERR(scu_ic->scu)) { +- rc = PTR_ERR(scu_ic->scu); +- goto err; ++ if (scu_ic->split_ier_isr) { ++ writel(AST2700_SCU_IC_STATUS, scu_ic->base + scu_ic->isr); ++ writel(0, scu_ic->base + scu_ic->ier); ++ } else { ++ writel(ASPEED_SCU_IC_STATUS, scu_ic->base); ++ writel(0, scu_ic->base); + } +- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS); +- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0); + + irq = irq_of_parse_and_map(node, 0); + if (!irq) { +@@ -166,14 +236,15 @@ + } + + scu_ic->irq_domain = irq_domain_add_linear(node, scu_ic->num_irqs, +- &aspeed_scu_ic_domain_ops, +- scu_ic); ++ &aspeed_scu_ic_domain_ops, scu_ic); + if (!scu_ic->irq_domain) { + rc = -ENOMEM; + goto err; + } + +- irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler, ++ irq_set_chained_handler_and_data(irq, scu_ic->split_ier_isr ? ++ aspeed_scu_ic_irq_handler_split : ++ aspeed_scu_ic_irq_handler_combined, + scu_ic); + + return 0; +@@ -184,57 +255,45 @@ + return rc; + } + +-static int __init aspeed_scu_ic_of_init(struct device_node *node, +- struct device_node *parent) ++static const struct aspeed_scu_ic_variant * ++aspeed_scu_ic_find_variant(struct device_node *np) + { +- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); +- +- if (!scu_ic) +- return -ENOMEM; +- +- scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE; +- scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT; +- scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS; +- scu_ic->reg = ASPEED_SCU_IC_REG; ++ for (int i = 0; i < ARRAY_SIZE(scu_ic_variants); i++) { ++ if (of_device_is_compatible(np, scu_ic_variants[i].compatible)) ++ return &scu_ic_variants[i]; ++ } + +- return aspeed_scu_ic_of_init_common(scu_ic, node); ++ return NULL; + } + +-static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node, +- struct device_node *parent) ++static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent) + { +- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); +- +- if (!scu_ic) +- return -ENOMEM; +- +- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE; +- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT; +- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS; +- scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG; +- +- return aspeed_scu_ic_of_init_common(scu_ic, node); +-} ++ const struct aspeed_scu_ic_variant *variant; ++ struct aspeed_scu_ic *scu_ic; + +-static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node, +- struct device_node *parent) +-{ +- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); ++ variant = aspeed_scu_ic_find_variant(node); ++ if (!variant) ++ return -ENODEV; + ++ scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL); + if (!scu_ic) + return -ENOMEM; + +- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE; +- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT; +- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS; +- scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG; ++ scu_ic->irq_enable = variant->irq_enable; ++ scu_ic->irq_shift = variant->irq_shift; ++ scu_ic->num_irqs = variant->num_irqs; ++ scu_ic->split_ier_isr = variant->split_ier_isr; ++ scu_ic->ier = variant->ier; ++ scu_ic->isr = variant->isr; + + return aspeed_scu_ic_of_init_common(scu_ic, node); + } + + IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init); + IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init); +-IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", +- aspeed_ast2600_scu_ic0_of_init); +-IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", +- aspeed_ast2600_scu_ic1_of_init); ++IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", aspeed_scu_ic_of_init); ++IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", aspeed_scu_ic_of_init); ++IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0", aspeed_scu_ic_of_init); ++IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1", aspeed_scu_ic_of_init); ++IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2", aspeed_scu_ic_of_init); ++IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3", aspeed_scu_ic_of_init); +diff --git a/drivers/jtag/Kconfig b/drivers/jtag/Kconfig +--- a/drivers/jtag/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/jtag/Kconfig 2025-12-23 10:16:17.210098268 +0000 +@@ -0,0 +1,31 @@ ++menuconfig JTAG ++ tristate "JTAG support" ++ help ++ This provides basic core functionality support for JTAG class devices. ++ Hardware that is equipped with a JTAG microcontroller can be ++ supported by using this driver's interfaces. ++ This driver exposes a set of IOCTLs to the user space for ++ the following commands: ++ SDR: Performs an IEEE 1149.1 Data Register scan ++ SIR: Performs an IEEE 1149.1 Instruction Register scan. ++ RUNTEST: Forces the IEEE 1149.1 bus to a run state for a specified ++ number of clocks or a specified time period. ++ ++ If you want this support, you should say Y here. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called jtag. ++ ++menuconfig JTAG_ASPEED ++ tristate "Aspeed SoC JTAG controller support" ++ depends on JTAG && HAS_IOMEM ++ depends on ARCH_ASPEED || COMPILE_TEST ++ help ++ This provides a support for Aspeed JTAG device, equipped on ++ Aspeed SoC 24xx and 25xx families. Drivers allows programming ++ of hardware devices, connected to SoC through the JTAG interface. ++ ++ If you want this support, you should say Y here. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called jtag-aspeed. +diff --git a/drivers/jtag/Makefile b/drivers/jtag/Makefile +--- a/drivers/jtag/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/jtag/Makefile 2025-12-23 10:16:17.210098268 +0000 +@@ -0,0 +1,2 @@ ++obj-$(CONFIG_JTAG) += jtag.o ++obj-$(CONFIG_JTAG_ASPEED) += jtag-aspeed.o +diff --git a/drivers/jtag/jtag-aspeed.c b/drivers/jtag/jtag-aspeed.c +--- a/drivers/jtag/jtag-aspeed.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/jtag/jtag-aspeed.c 2025-12-23 10:16:20.997034798 +0000 +@@ -0,0 +1,1657 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (c) 2018 Mellanox Technologies. All rights reserved. ++// Copyright (c) 2018 Oleksandr Shamray ++// Copyright (c) 2019 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ASPEED_JTAG_DATA 0x00 ++#define ASPEED_JTAG_INST 0x04 ++#define ASPEED_JTAG_CTRL 0x08 ++#define ASPEED_JTAG_ISR 0x0C ++#define ASPEED_JTAG_SW 0x10 ++#define ASPEED_JTAG_TCK 0x14 ++#define ASPEED_JTAG_EC 0x18 ++ ++#define ASPEED_JTAG_DATA_MSB 0x01 ++#define ASPEED_JTAG_DATA_CHUNK_SIZE 0x20 ++#define ASPEED_JTAG_HW2_DATA_CHUNK_SIZE 512 ++ ++/* ASPEED_JTAG_CTRL: Engine Control 24xx and 25xx series*/ ++#define ASPEED_JTAG_CTL_ENG_EN BIT(31) ++#define ASPEED_JTAG_CTL_ENG_OUT_EN BIT(30) ++#define ASPEED_JTAG_CTL_FORCE_TMS BIT(29) ++#define ASPEED_JTAG_CTL_IR_UPDATE BIT(26) ++#define ASPEED_JTAG_CTL_INST_LEN(x) ((x) << 20) ++#define ASPEED_JTAG_CTL_LASPEED_INST BIT(17) ++#define ASPEED_JTAG_CTL_INST_EN BIT(16) ++#define ASPEED_JTAG_CTL_DR_UPDATE BIT(10) ++#define ASPEED_JTAG_CTL_DATA_LEN(x) ((x) << 4) ++#define ASPEED_JTAG_CTL_LASPEED_DATA BIT(1) ++#define ASPEED_JTAG_CTL_DATA_EN BIT(0) ++ ++/* ASPEED_JTAG_CTRL: Engine Control 26xx series*/ ++#define ASPEED_JTAG_CTL_26XX_RESET_FIFO BIT(21) ++#define ASPEED_JTAG_CTL_26XX_FIFO_MODE_CTRL BIT(20) ++#define ASPEED_JTAG_CTL_26XX_TRANS_LEN(x) ((x) << 8) ++#define ASPEED_JTAG_CTL_26XX_TRANS_MASK GENMASK(17, 8) ++#define ASPEED_JTAG_CTL_26XX_MSB_FIRST BIT(6) ++#define ASPEED_JTAG_CTL_26XX_TERM_TRANS BIT(5) ++#define ASPEED_JTAG_CTL_26XX_LASPEED_TRANS BIT(4) ++#define ASPEED_JTAG_CTL_26XX_INST_EN BIT(1) ++ ++/* ASPEED_JTAG_ISR : Interrupt status and enable */ ++#define ASPEED_JTAG_ISR_INST_PAUSE BIT(19) ++#define ASPEED_JTAG_ISR_INST_COMPLETE BIT(18) ++#define ASPEED_JTAG_ISR_DATA_PAUSE BIT(17) ++#define ASPEED_JTAG_ISR_DATA_COMPLETE BIT(16) ++#define ASPEED_JTAG_ISR_INST_PAUSE_EN BIT(3) ++#define ASPEED_JTAG_ISR_INST_COMPLETE_EN BIT(2) ++#define ASPEED_JTAG_ISR_DATA_PAUSE_EN BIT(1) ++#define ASPEED_JTAG_ISR_DATA_COMPLETE_EN BIT(0) ++#define ASPEED_JTAG_ISR_INT_EN_MASK GENMASK(3, 0) ++#define ASPEED_JTAG_ISR_INT_MASK GENMASK(19, 16) ++ ++/* ASPEED_JTAG_SW : Software Mode and Status */ ++#define ASPEED_JTAG_SW_MODE_EN BIT(19) ++#define ASPEED_JTAG_SW_MODE_TCK BIT(18) ++#define ASPEED_JTAG_SW_MODE_TMS BIT(17) ++#define ASPEED_JTAG_SW_MODE_TDIO BIT(16) ++ ++/* ASPEED_JTAG_TCK : TCK Control */ ++#define ASPEED_JTAG_TCK_DIVISOR_MASK GENMASK(11, 0) ++#define ASPEED_JTAG_TCK_GET_DIV(x) ((x) & ASPEED_JTAG_TCK_DIVISOR_MASK) ++ ++/* ASPEED_JTAG_EC : Controller set for go to IDLE */ ++#define ASPEED_JTAG_EC_TRSTn_HIGH BIT(31) ++#define ASPEED_JTAG_EC_GO_IDLE BIT(0) ++ ++#define ASPEED_JTAG_IOUT_LEN(len) \ ++ (ASPEED_JTAG_CTL_ENG_EN | \ ++ ASPEED_JTAG_CTL_ENG_OUT_EN | \ ++ ASPEED_JTAG_CTL_INST_LEN(len)) ++ ++#define ASPEED_JTAG_DOUT_LEN(len) \ ++ (ASPEED_JTAG_CTL_ENG_EN | \ ++ ASPEED_JTAG_CTL_ENG_OUT_EN | \ ++ ASPEED_JTAG_CTL_DATA_LEN(len)) ++ ++#define ASPEED_JTAG_TRANS_LEN(len) \ ++ (ASPEED_JTAG_CTL_ENG_EN | \ ++ ASPEED_JTAG_CTL_ENG_OUT_EN | \ ++ ASPEED_JTAG_CTL_26XX_TRANS_LEN(len)) ++ ++#define ASPEED_JTAG_SW_TDIO (ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TDIO) ++ ++#define ASPEED_JTAG_GET_TDI(direction, byte) \ ++ (((direction) & JTAG_WRITE_XFER) ? byte : UINT_MAX) ++ ++#define ASPEED_JTAG_TCK_WAIT 10 ++#define ASPEED_JTAG_RESET_CNTR 10 ++#define WAIT_ITERATIONS 300 ++ ++/* Use this macro to switch between HW mode 1(comment out) and 2(defined) */ ++#define ASPEED_JTAG_HW_MODE_2_ENABLE 1 ++ ++/* ASPEED JTAG HW MODE 2 (Only supported in AST26xx series) */ ++#define ASPEED_JTAG_SHDATA 0x20 ++#define ASPEED_JTAG_SHINST 0x24 ++#define ASPEED_JTAG_PADCTRL0 0x28 ++#define ASPEED_JTAG_PADCTRL1 0x2C ++#define ASPEED_JTAG_SHCTRL 0x30 ++#define ASPEED_JTAG_GBLCTRL 0x34 ++#define ASPEED_JTAG_INTCTRL 0x38 ++#define ASPEED_JTAG_STAT 0x3C ++ ++/* ASPEED_JTAG_PADCTRLx : Padding control 0 and 1 */ ++#define ASPEED_JTAG_PADCTRL_PAD_DATA BIT(24) ++#define ASPEED_JTAG_PADCTRL_POSTPAD(x) (((x) & GENMASK(8, 0)) << 12) ++#define ASPEED_JTAG_PADCTRL_PREPAD(x) (((x) & GENMASK(8, 0)) << 0) ++ ++/* ASPEED_JTAG_SHCTRL: Shift Control */ ++#define ASPEED_JTAG_SHCTRL_FRUN_TCK_EN BIT(31) ++#define ASPEED_JTAG_SHCTRL_STSHIFT_EN BIT(30) ++#define ASPEED_JTAG_SHCTRL_TMS(x) (((x) & GENMASK(13, 0)) << 16) ++#define ASPEED_JTAG_SHCTRL_POST_TMS(x) (((x) & GENMASK(2, 0)) << 13) ++#define ASPEED_JTAG_SHCTRL_PRE_TMS(x) (((x) & GENMASK(2, 0)) << 10) ++#define ASPEED_JTAG_SHCTRL_PAD_SEL0 (0) ++#define ASPEED_JTAG_SHCTRL_PAD_SEL1 BIT(9) ++#define ASPEED_JTAG_SHCTRL_END_SHIFT BIT(8) ++#define ASPEED_JTAG_SHCTRL_START_SHIFT BIT(7) ++#define ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(x) ((x) & GENMASK(6, 0)) ++ ++#define ASPEED_JTAG_END_SHIFT_DISABLED 0 ++ ++/* ASPEED_JTAG_GBLCTRL : Global Control */ ++#define ASPEED_JTAG_GBLCTRL_ENG_MODE_EN BIT(31) ++#define ASPEED_JTAG_GBLCTRL_ENG_OUT_EN BIT(30) ++#define ASPEED_JTAG_GBLCTRL_FORCE_TMS BIT(29) ++#define ASPEED_JTAG_GBLCTRL_SHIFT_COMPLETE BIT(28) ++#define ASPEED_JTAG_GBLCTRL_RESET_FIFO BIT(25) ++#define ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE BIT(24) ++#define ASPEED_JTAG_GBLCTRL_UPDT_SHIFT(x) (((x) & GENMASK(9, 7)) << 13) ++#define ASPEED_JTAG_GBLCTRL_STSHIFT(x) (((x) & GENMASK(0, 0)) << 16) ++#define ASPEED_JTAG_GBLCTRL_TRST BIT(15) ++#define ASPEED_JTAG_CLK_DIVISOR_MASK GENMASK(11, 0) ++#define ASPEED_JTAG_CLK_GET_DIV(x) ((x) & ASPEED_JTAG_CLK_DIVISOR_MASK) ++ ++/* ASPEED_JTAG_INTCTRL: Interrupt Control */ ++#define ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN BIT(16) ++#define ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT BIT(0) ++ ++/* ASPEED_JTAG_STAT: JTAG HW mode 2 status */ ++#define ASPEED_JTAG_STAT_ENG_IDLE BIT(0) ++ ++#define ASPEED_JTAG_MAX_PAD_SIZE 512 ++ ++/* Use this macro to set us delay to WA the intensive R/W FIFO usage issue */ ++#define AST26XX_FIFO_UDELAY 2 ++ ++/* Use this macro to set us delay for JTAG Master Controller to be programmed */ ++#define AST26XX_JTAG_CTRL_UDELAY 2 ++ ++#define DEBUG_JTAG ++ ++static const char * const regnames[] = { ++ [ASPEED_JTAG_DATA] = "ASPEED_JTAG_DATA", ++ [ASPEED_JTAG_INST] = "ASPEED_JTAG_INST", ++ [ASPEED_JTAG_CTRL] = "ASPEED_JTAG_CTRL", ++ [ASPEED_JTAG_ISR] = "ASPEED_JTAG_ISR", ++ [ASPEED_JTAG_SW] = "ASPEED_JTAG_SW", ++ [ASPEED_JTAG_TCK] = "ASPEED_JTAG_TCK", ++ [ASPEED_JTAG_EC] = "ASPEED_JTAG_EC", ++ [ASPEED_JTAG_SHDATA] = "ASPEED_JTAG_SHDATA", ++ [ASPEED_JTAG_SHINST] = "ASPEED_JTAG_SHINST", ++ [ASPEED_JTAG_PADCTRL0] = "ASPEED_JTAG_PADCTRL0", ++ [ASPEED_JTAG_PADCTRL1] = "ASPEED_JTAG_PADCTRL1", ++ [ASPEED_JTAG_SHCTRL] = "ASPEED_JTAG_SHCTRL", ++ [ASPEED_JTAG_GBLCTRL] = "ASPEED_JTAG_GBLCTRL", ++ [ASPEED_JTAG_INTCTRL] = "ASPEED_JTAG_INTCTRL", ++ [ASPEED_JTAG_STAT] = "ASPEED_JTAG_STAT", ++}; ++ ++#define ASPEED_JTAG_NAME "jtag-aspeed" ++ ++struct aspeed_jtag { ++ void __iomem *reg_base; ++ struct device *dev; ++ struct clk *pclk; ++ enum jtag_tapstate status; ++ int irq; ++ struct reset_control *rst; ++ u32 flag; ++ wait_queue_head_t jtag_wq; ++ u32 mode; ++ enum jtag_tapstate current_state; ++ u32 tck_period; ++ const struct jtag_low_level_functions *llops; ++ u32 pad_data_one[ASPEED_JTAG_MAX_PAD_SIZE / 32]; ++ u32 pad_data_zero[ASPEED_JTAG_MAX_PAD_SIZE / 32]; ++}; ++ ++/* ++ * Multi generation support is enabled by fops and low level assped function ++ * mapping using asped_jtag_functions struct as config mechanism. ++ */ ++ ++struct jtag_low_level_functions { ++ void (*output_disable)(struct aspeed_jtag *aspeed_jtag); ++ void (*master_enable)(struct aspeed_jtag *aspeed_jtag); ++ int (*xfer_push_data)(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, u32 bits_len); ++ int (*xfer_push_data_last)(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, u32 bits_len); ++ void (*xfer_sw)(struct aspeed_jtag *aspeed_jtag, struct jtag_xfer *xfer, ++ u32 *data); ++ int (*xfer_hw)(struct aspeed_jtag *aspeed_jtag, struct jtag_xfer *xfer, ++ u32 *data); ++ int (*trst_set)(struct aspeed_jtag *aspeed_jtag, u32 active); ++ void (*xfer_hw_fifo_delay)(void); ++ void (*xfer_sw_delay)(struct aspeed_jtag *aspeed_jtag); ++ irqreturn_t (*jtag_interrupt)(s32 this_irq, void *dev_id); ++}; ++ ++struct aspeed_jtag_functions { ++ const struct jtag_ops *aspeed_jtag_ops; ++ const struct jtag_low_level_functions *aspeed_jtag_llops; ++}; ++ ++#ifdef DEBUG_JTAG ++static char *end_status_str[] = { "tlr", "idle", "selDR", "capDR", "sDR", ++ "ex1DR", "pDR", "ex2DR", "updDR", "selIR", ++ "capIR", "sIR", "ex1IR", "pIR", "ex2IR", ++ "updIR", "current" }; ++#endif ++ ++static u32 aspeed_jtag_read(struct aspeed_jtag *aspeed_jtag, u32 reg) ++{ ++ u32 val = readl(aspeed_jtag->reg_base + reg); ++ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "read:%s val = 0x%08x\n", regnames[reg], val); ++#endif ++ return val; ++} ++ ++static void aspeed_jtag_write(struct aspeed_jtag *aspeed_jtag, u32 val, u32 reg) ++{ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "write:%s val = 0x%08x\n", regnames[reg], ++ val); ++#endif ++ writel(val, aspeed_jtag->reg_base + reg); ++} ++ ++static int aspeed_jtag_freq_set(struct jtag *jtag, u32 freq) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ unsigned long apb_frq; ++ u32 tck_val; ++ u16 div; ++ ++ if (!freq) ++ return -EINVAL; ++ ++ apb_frq = clk_get_rate(aspeed_jtag->pclk); ++ if (!apb_frq) ++ return -EOPNOTSUPP; ++ ++ div = (apb_frq - 1) / freq; ++ if (div > ASPEED_JTAG_TCK_DIVISOR_MASK) ++ div = ASPEED_JTAG_TCK_DIVISOR_MASK; ++ tck_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_TCK); ++ aspeed_jtag_write(aspeed_jtag, ++ (tck_val & ~ASPEED_JTAG_TCK_DIVISOR_MASK) | div, ++ ASPEED_JTAG_TCK); ++ aspeed_jtag->tck_period = ++ DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * (div + 1), apb_frq); ++ return 0; ++} ++ ++static int aspeed_jtag_freq_set_26xx(struct jtag *jtag, u32 freq) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ unsigned long apb_frq; ++ u32 tck_val; ++ u16 div; ++ ++ if (!freq) ++ return -EINVAL; ++ ++ apb_frq = clk_get_rate(aspeed_jtag->pclk); ++ if (!apb_frq) ++ return -EOPNOTSUPP; ++ ++ div = (apb_frq - 1) / freq; ++ tck_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ (tck_val & ~ASPEED_JTAG_CLK_DIVISOR_MASK) | div, ++ ASPEED_JTAG_GBLCTRL); ++ aspeed_jtag->tck_period = ++ DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * (div + 1), apb_frq); ++ return 0; ++} ++ ++static int aspeed_jtag_freq_get(struct jtag *jtag, u32 *frq) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ u32 pclk; ++ u32 tck; ++ ++ pclk = clk_get_rate(aspeed_jtag->pclk); ++ tck = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_TCK); ++ *frq = pclk / (ASPEED_JTAG_TCK_GET_DIV(tck) + 1); ++ ++ return 0; ++} ++ ++static int aspeed_jtag_freq_get_26xx(struct jtag *jtag, u32 *frq) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ u32 pclk; ++ u32 tck; ++ ++ pclk = clk_get_rate(aspeed_jtag->pclk); ++ tck = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ *frq = pclk / (ASPEED_JTAG_CLK_GET_DIV(tck) + 1); ++ ++ return 0; ++} ++ ++static inline void aspeed_jtag_output_disable(struct aspeed_jtag *aspeed_jtag) ++{ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL); ++} ++ ++static inline void ++aspeed_jtag_output_disable_26xx(struct aspeed_jtag *aspeed_jtag) ++{ ++ u32 reg_val; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL) & ++ ASPEED_JTAG_CLK_DIVISOR_MASK; ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, reg_val, ASPEED_JTAG_GBLCTRL); ++} ++ ++static inline void aspeed_jtag_master(struct aspeed_jtag *aspeed_jtag) ++{ ++ aspeed_jtag_write(aspeed_jtag, ++ (ASPEED_JTAG_CTL_ENG_EN | ASPEED_JTAG_CTL_ENG_OUT_EN), ++ ASPEED_JTAG_CTRL); ++ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TDIO, ++ ASPEED_JTAG_SW); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_PAUSE | ++ ASPEED_JTAG_ISR_INST_COMPLETE | ++ ASPEED_JTAG_ISR_DATA_PAUSE | ++ ASPEED_JTAG_ISR_DATA_COMPLETE | ++ ASPEED_JTAG_ISR_INST_PAUSE_EN | ++ ASPEED_JTAG_ISR_INST_COMPLETE_EN | ++ ASPEED_JTAG_ISR_DATA_PAUSE_EN | ++ ASPEED_JTAG_ISR_DATA_COMPLETE_EN, ++ ASPEED_JTAG_ISR); /* Enable Interrupt */ ++} ++ ++static inline void aspeed_jtag_master_26xx(struct aspeed_jtag *aspeed_jtag) ++{ ++ u32 reg_val; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL) & ++ ASPEED_JTAG_CLK_DIVISOR_MASK; ++ if (aspeed_jtag->mode & JTAG_XFER_HW_MODE) { ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW); ++ } else { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_SW_MODE_EN | ++ ASPEED_JTAG_SW_MODE_TDIO, ++ ASPEED_JTAG_SW); ++ } ++ /* ++ * For the software mode, it's still necessary to enable out_en and ++ * select the out_en in the hw2 register to maintain control of the ++ * TRST bit same as hw2. ++ */ ++ aspeed_jtag_write(aspeed_jtag, ++ reg_val | ASPEED_JTAG_GBLCTRL_ENG_MODE_EN | ++ ASPEED_JTAG_GBLCTRL_ENG_OUT_EN | ++ ASPEED_JTAG_GBLCTRL_TRST, ++ ASPEED_JTAG_GBLCTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN | ++ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT, ++ ASPEED_JTAG_INTCTRL); /* Enable HW2 IRQ */ ++ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_PAUSE | ++ ASPEED_JTAG_ISR_INST_COMPLETE | ++ ASPEED_JTAG_ISR_DATA_PAUSE | ++ ASPEED_JTAG_ISR_DATA_COMPLETE | ++ ASPEED_JTAG_ISR_INST_PAUSE_EN | ++ ASPEED_JTAG_ISR_INST_COMPLETE_EN | ++ ASPEED_JTAG_ISR_DATA_PAUSE_EN | ++ ASPEED_JTAG_ISR_DATA_COMPLETE_EN, ++ ASPEED_JTAG_ISR); /* Enable HW1 Interrupts */ ++} ++ ++static int aspeed_jtag_mode_set(struct jtag *jtag, struct jtag_mode *jtag_mode) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ switch (jtag_mode->feature) { ++ case JTAG_XFER_MODE: ++ aspeed_jtag->mode = jtag_mode->mode; ++ aspeed_jtag->llops->master_enable(aspeed_jtag); ++ break; ++ case JTAG_CONTROL_MODE: ++ if (jtag_mode->mode == JTAG_CONTROLLER_OUTPUT_DISABLE) ++ aspeed_jtag->llops->output_disable(aspeed_jtag); ++ else if (jtag_mode->mode == JTAG_CONTROLLER_MODE) ++ aspeed_jtag->llops->master_enable(aspeed_jtag); ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/* ++ * We read and write from an unused JTAG Master controller register in SW ++ * mode to create a delay in xfers. ++ * We found this mechanism better than any udelay or usleep option. ++ */ ++static inline void aspeed_jtag_sw_delay_26xx(struct aspeed_jtag *aspeed_jtag) ++{ ++ u32 read_reg = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_PADCTRL1); ++ ++ aspeed_jtag_write(aspeed_jtag, read_reg, ASPEED_JTAG_PADCTRL1); ++} ++ ++static char aspeed_jtag_tck_cycle(struct aspeed_jtag *aspeed_jtag, u8 tms, ++ u8 tdi) ++{ ++ char tdo = 0; ++ ++ /* TCK = 0 */ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_SW_MODE_EN | ++ (tms * ASPEED_JTAG_SW_MODE_TMS) | ++ (tdi * ASPEED_JTAG_SW_MODE_TDIO), ++ ASPEED_JTAG_SW); ++ ++ /* Wait until JTAG Master controller finishes the operation */ ++ if (aspeed_jtag->llops->xfer_sw_delay) ++ aspeed_jtag->llops->xfer_sw_delay(aspeed_jtag); ++ else ++ aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_SW); ++ ++ ndelay(aspeed_jtag->tck_period >> 1); ++ ++ /* TCK = 1 */ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_SW_MODE_EN | ASPEED_JTAG_SW_MODE_TCK | ++ (tms * ASPEED_JTAG_SW_MODE_TMS) | ++ (tdi * ASPEED_JTAG_SW_MODE_TDIO), ++ ASPEED_JTAG_SW); ++ ++ /* Wait until JTAG Master controller finishes the operation */ ++ if (aspeed_jtag->llops->xfer_sw_delay) ++ aspeed_jtag->llops->xfer_sw_delay(aspeed_jtag); ++ ++ ndelay(aspeed_jtag->tck_period >> 1); ++ ++ if (aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_SW) & ++ ASPEED_JTAG_SW_MODE_TDIO) ++ tdo = 1; ++ ++ return tdo; ++} ++ ++static int aspeed_jtag_bitbang(struct jtag *jtag, ++ struct bitbang_packet *bitbang, ++ struct tck_bitbang *bitbang_data) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ int i = 0; ++ ++ for (i = 0; i < bitbang->length; i++) { ++ bitbang_data[i].tdo = ++ aspeed_jtag_tck_cycle(aspeed_jtag, bitbang_data[i].tms, ++ bitbang_data[i].tdi); ++ } ++ return 0; ++} ++ ++static inline void aspeed_jtag_xfer_hw_fifo_delay_26xx(void) ++{ ++ udelay(AST26XX_FIFO_UDELAY); ++} ++ ++static int aspeed_jtag_isr_wait(struct aspeed_jtag *aspeed_jtag, u32 bit) ++{ ++ int res = 0; ++ u32 status = 0; ++ u32 iterations = 0; ++ ++ if (!aspeed_jtag->irq) { ++ res = wait_event_interruptible(aspeed_jtag->jtag_wq, ++ aspeed_jtag->flag & bit); ++ aspeed_jtag->flag &= ~bit; ++ } else { ++ while ((status & bit) == 0) { ++ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_ISR); ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "%s = 0x%08x\n", __func__, ++ status); ++#endif ++ iterations++; ++ if (iterations > WAIT_ITERATIONS) { ++ dev_err(aspeed_jtag->dev, ++ "%s %d in ASPEED_JTAG_ISR\n", ++ "aspeed_jtag driver timed out waiting for bit", ++ bit); ++ res = -EFAULT; ++ break; ++ } ++ if ((status & ASPEED_JTAG_ISR_DATA_COMPLETE) == 0) { ++ if (iterations % 25 == 0) ++ usleep_range(1, 5); ++ else ++ udelay(1); ++ } ++ } ++ aspeed_jtag_write(aspeed_jtag, bit | (status & 0xf), ++ ASPEED_JTAG_ISR); ++ } ++ return res; ++} ++ ++static int aspeed_jtag_wait_shift_complete(struct aspeed_jtag *aspeed_jtag) ++{ ++ int res = 0; ++ u32 status = 0; ++ u32 iterations = 0; ++ ++ if (!aspeed_jtag->irq) { ++ res = wait_event_interruptible(aspeed_jtag->jtag_wq, ++ aspeed_jtag->flag & ++ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT); ++ aspeed_jtag->flag &= ~ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT; ++ } else { ++ while ((status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT) == 0) { ++ status = aspeed_jtag_read(aspeed_jtag, ++ ASPEED_JTAG_INTCTRL); ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "%s = 0x%08x\n", __func__, ++ status); ++#endif ++ iterations++; ++ if (iterations > WAIT_ITERATIONS) { ++ dev_err(aspeed_jtag->dev, ++ "aspeed_jtag driver timed out waiting for shift completed\n"); ++ res = -EFAULT; ++ break; ++ } ++ if (iterations % 25 == 0) ++ usleep_range(1, 5); ++ else ++ udelay(1); ++ } ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT | ++ ASPEED_JTAG_INTCTRL_SHCPL_IRQ_EN, ++ ASPEED_JTAG_INTCTRL); ++ } ++ ++ return res; ++} ++ ++static void aspeed_jtag_set_tap_state(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_tapstate from_state, ++ enum jtag_tapstate end_state) ++{ ++ int i = 0; ++ enum jtag_tapstate from, to; ++ ++ from = from_state; ++ to = end_state; ++ ++ for (i = 0; i < _tms_cycle_lookup[from][to].count; i++) ++ aspeed_jtag_tck_cycle(aspeed_jtag, ++ ((_tms_cycle_lookup[from][to].tmsbits ++ >> i) & 0x1), 0); ++ aspeed_jtag->current_state = end_state; ++} ++ ++static void aspeed_jtag_set_tap_state_sw(struct aspeed_jtag *aspeed_jtag, ++ struct jtag_tap_state *tapstate) ++{ ++ int i; ++ ++ /* SW mode from curent tap state -> to end_state */ ++ if (tapstate->reset || tapstate->endstate == JTAG_STATE_TLRESET) { ++ for (i = 0; i < ASPEED_JTAG_RESET_CNTR; i++) ++ aspeed_jtag_tck_cycle(aspeed_jtag, 1, 0); ++ aspeed_jtag->current_state = JTAG_STATE_TLRESET; ++ } ++ ++ aspeed_jtag_set_tap_state(aspeed_jtag, tapstate->from, ++ tapstate->endstate); ++ if (tapstate->endstate == JTAG_STATE_TLRESET || ++ tapstate->endstate == JTAG_STATE_IDLE || ++ tapstate->endstate == JTAG_STATE_PAUSEDR || ++ tapstate->endstate == JTAG_STATE_PAUSEIR) ++ for (i = 0; i < tapstate->tck; i++) ++ aspeed_jtag_tck_cycle(aspeed_jtag, 0, 0); ++} ++ ++static int aspeed_jtag_status_set(struct jtag *jtag, ++ struct jtag_tap_state *tapstate) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ int i; ++ ++ if (tapstate->from == JTAG_STATE_CURRENT) ++ tapstate->from = aspeed_jtag->current_state; ++ if (tapstate->endstate == JTAG_STATE_CURRENT) ++ tapstate->endstate = aspeed_jtag->current_state; ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "Set TAP state: %s\n", ++ end_status_str[tapstate->endstate]); ++#endif ++ ++ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) { ++ aspeed_jtag_set_tap_state_sw(aspeed_jtag, tapstate); ++ return 0; ++ } ++ ++ /* x TMS high + 1 TMS low */ ++ if (tapstate->reset) { ++ /* Disable sw mode */ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW); ++ mdelay(1); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_CTL_ENG_EN | ++ ASPEED_JTAG_CTL_ENG_OUT_EN | ++ ASPEED_JTAG_CTL_FORCE_TMS, ++ ASPEED_JTAG_CTRL); ++ mdelay(1); ++ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_SW_TDIO, ++ ASPEED_JTAG_SW); ++ aspeed_jtag->current_state = JTAG_STATE_TLRESET; ++ } ++ for (i = 0; i < tapstate->tck; i++) ++ ndelay(aspeed_jtag->tck_period); ++ ++ return 0; ++} ++ ++static int aspeed_jtag_shctrl_tms_mask(enum jtag_tapstate from, ++ enum jtag_tapstate to, ++ enum jtag_tapstate there, ++ enum jtag_tapstate endstate, ++ u32 start_shift, u32 end_shift, ++ u32 *tms_mask) ++{ ++ u32 pre_tms = start_shift ? _tms_cycle_lookup[from][to].count : 0; ++ u32 post_tms = end_shift ? _tms_cycle_lookup[there][endstate].count : 0; ++ u32 tms_value = start_shift ? _tms_cycle_lookup[from][to].tmsbits : 0; ++ ++ tms_value |= end_shift ? _tms_cycle_lookup[there][endstate].tmsbits ++ << pre_tms : ++ 0; ++ if (pre_tms > GENMASK(2, 0) || post_tms > GENMASK(2, 0)) { ++ pr_err("pre/port tms count is greater than hw limit"); ++ return -EINVAL; ++ } ++ *tms_mask = start_shift | ASPEED_JTAG_SHCTRL_PRE_TMS(pre_tms) | ++ end_shift | ASPEED_JTAG_SHCTRL_POST_TMS(post_tms) | ++ ASPEED_JTAG_SHCTRL_TMS(tms_value); ++ return 0; ++} ++ ++static void aspeed_jtag_set_tap_state_hw2(struct aspeed_jtag *aspeed_jtag, ++ struct jtag_tap_state *tapstate) ++{ ++ u32 reg_val, execute_tck; ++ u32 tck = tapstate->tck; ++ ++ /* x TMS high + 1 TMS low */ ++ if (tapstate->reset || tapstate->endstate == JTAG_STATE_TLRESET) { ++ /* Disable sw mode */ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW); ++ udelay(AST26XX_JTAG_CTRL_UDELAY); ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ reg_val | ASPEED_JTAG_GBLCTRL_ENG_MODE_EN | ++ ASPEED_JTAG_GBLCTRL_ENG_OUT_EN | ++ ASPEED_JTAG_GBLCTRL_RESET_FIFO | ++ ASPEED_JTAG_GBLCTRL_FORCE_TMS, ++ ASPEED_JTAG_GBLCTRL); ++ udelay(AST26XX_JTAG_CTRL_UDELAY); ++ while (aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL) & ASPEED_JTAG_GBLCTRL_FORCE_TMS) ++ ; ++ aspeed_jtag->current_state = JTAG_STATE_TLRESET; ++ } else { ++ aspeed_jtag_set_tap_state(aspeed_jtag, ++ aspeed_jtag->current_state, ++ tapstate->endstate); ++ } ++ /* Run TCK */ ++ while (tck) { ++ execute_tck = tck > GENMASK(9, 0) ? GENMASK(9, 0) : tck; ++ /* Disable sw mode */ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW); ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_PADCTRL0); ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ reg_val = reg_val & ~(GENMASK(22, 20)); ++ aspeed_jtag_write(aspeed_jtag, ++ reg_val | ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE | ++ ASPEED_JTAG_GBLCTRL_STSHIFT(0) | ++ ASPEED_JTAG_GBLCTRL_UPDT_SHIFT(execute_tck), ++ ASPEED_JTAG_GBLCTRL); ++ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_SHCTRL_STSHIFT_EN | ++ ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(execute_tck), ++ ASPEED_JTAG_SHCTRL); ++ aspeed_jtag_wait_shift_complete(aspeed_jtag); ++ tck -= execute_tck; ++ } ++} ++ ++static int aspeed_jtag_status_set_26xx(struct jtag *jtag, ++ struct jtag_tap_state *tapstate) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ if (tapstate->from == JTAG_STATE_CURRENT) ++ tapstate->from = aspeed_jtag->current_state; ++ if (tapstate->endstate == JTAG_STATE_CURRENT) ++ tapstate->endstate = aspeed_jtag->current_state; ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "Set TAP state: status %s from %s to %s\n", ++ end_status_str[aspeed_jtag->current_state], ++ end_status_str[tapstate->from], ++ end_status_str[tapstate->endstate]); ++#endif ++ ++ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) { ++ aspeed_jtag_set_tap_state_sw(aspeed_jtag, tapstate); ++ return 0; ++ } ++ ++ aspeed_jtag_set_tap_state_hw2(aspeed_jtag, tapstate); ++ return 0; ++} ++ ++static void aspeed_jtag_xfer_sw(struct aspeed_jtag *aspeed_jtag, ++ struct jtag_xfer *xfer, u32 *data) ++{ ++ unsigned long remain_xfer = xfer->length; ++ unsigned long shift_bits = 0; ++ unsigned long index = 0; ++ unsigned long tdi; ++ char tdo; ++ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "SW JTAG SHIFT %s, length = %d\n", ++ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length); ++#endif ++ ++ if (xfer->type == JTAG_SIR_XFER) ++ aspeed_jtag_set_tap_state(aspeed_jtag, xfer->from, ++ JTAG_STATE_SHIFTIR); ++ else ++ aspeed_jtag_set_tap_state(aspeed_jtag, xfer->from, ++ JTAG_STATE_SHIFTDR); ++ ++ tdi = ASPEED_JTAG_GET_TDI(xfer->direction, data[index]); ++ data[index] = 0; ++ while (remain_xfer > 1) { ++ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 0, ++ tdi & ASPEED_JTAG_DATA_MSB); ++ data[index] |= tdo ++ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE); ++ tdi >>= 1; ++ shift_bits++; ++ remain_xfer--; ++ ++ if (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE == 0) { ++ tdo = 0; ++ index++; ++ tdi = ASPEED_JTAG_GET_TDI(xfer->direction, data[index]); ++ data[index] = 0; ++ } ++ } ++ ++ if ((xfer->endstate == (xfer->type == JTAG_SIR_XFER ? ++ JTAG_STATE_SHIFTIR : ++ JTAG_STATE_SHIFTDR))) { ++ /* Stay in Shift IR/DR*/ ++ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 0, ++ tdi & ASPEED_JTAG_DATA_MSB); ++ data[index] |= tdo ++ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE); ++ } else { ++ /* Goto end state */ ++ tdo = aspeed_jtag_tck_cycle(aspeed_jtag, 1, ++ tdi & ASPEED_JTAG_DATA_MSB); ++ data[index] |= tdo ++ << (shift_bits % ASPEED_JTAG_DATA_CHUNK_SIZE); ++ aspeed_jtag->status = (xfer->type == JTAG_SIR_XFER) ? ++ JTAG_STATE_EXIT1IR : ++ JTAG_STATE_EXIT1DR; ++ aspeed_jtag_set_tap_state(aspeed_jtag, aspeed_jtag->status, ++ xfer->endstate); ++ } ++} ++ ++static int aspeed_jtag_xfer_push_data_26xx(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, ++ u32 bits_len) ++{ ++ int res = 0; ++ ++ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_TRANS_LEN(bits_len), ++ ASPEED_JTAG_CTRL); ++ if (type == JTAG_SIR_XFER) { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_TRANS_LEN(bits_len) | ++ ASPEED_JTAG_CTL_26XX_INST_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_PAUSE); ++ } else { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_TRANS_LEN(bits_len) | ++ ASPEED_JTAG_CTL_DATA_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_DATA_PAUSE); ++ } ++ return res; ++} ++ ++static int aspeed_jtag_xfer_push_data(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, u32 bits_len) ++{ ++ int res = 0; ++ ++ if (type == JTAG_SIR_XFER) { ++ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_IOUT_LEN(bits_len), ++ ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_IOUT_LEN(bits_len) | ++ ASPEED_JTAG_CTL_INST_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_PAUSE); ++ } else { ++ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_DOUT_LEN(bits_len), ++ ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_DOUT_LEN(bits_len) | ++ ASPEED_JTAG_CTL_DATA_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_DATA_PAUSE); ++ } ++ return res; ++} ++ ++static int aspeed_jtag_xfer_push_data_last_26xx(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, ++ u32 shift_bits) ++{ ++ int res = 0; ++ ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_TRANS_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS, ++ ASPEED_JTAG_CTRL); ++ if (type == JTAG_SIR_XFER) { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_TRANS_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS | ++ ASPEED_JTAG_CTL_26XX_INST_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_COMPLETE); ++ } else { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_TRANS_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_26XX_LASPEED_TRANS | ++ ASPEED_JTAG_CTL_DATA_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_DATA_COMPLETE); ++ } ++ return res; ++} ++ ++static int aspeed_jtag_xfer_push_data_last(struct aspeed_jtag *aspeed_jtag, ++ enum jtag_xfer_type type, ++ u32 shift_bits) ++{ ++ int res = 0; ++ ++ if (type == JTAG_SIR_XFER) { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_IOUT_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_LASPEED_INST, ++ ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_IOUT_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_LASPEED_INST | ++ ASPEED_JTAG_CTL_INST_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_INST_COMPLETE); ++ } else { ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_DOUT_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_LASPEED_DATA, ++ ASPEED_JTAG_CTRL); ++ aspeed_jtag_write(aspeed_jtag, ++ ASPEED_JTAG_DOUT_LEN(shift_bits) | ++ ASPEED_JTAG_CTL_LASPEED_DATA | ++ ASPEED_JTAG_CTL_DATA_EN, ++ ASPEED_JTAG_CTRL); ++ res = aspeed_jtag_isr_wait(aspeed_jtag, ++ ASPEED_JTAG_ISR_DATA_COMPLETE); ++ } ++ return res; ++} ++ ++static int aspeed_jtag_xfer_hw(struct aspeed_jtag *aspeed_jtag, ++ struct jtag_xfer *xfer, u32 *data) ++{ ++ unsigned long remain_xfer = xfer->length; ++ unsigned long index = 0; ++ char shift_bits; ++ u32 data_reg; ++ u32 scan_end; ++ union pad_config padding; ++ int retval = 0; ++ ++ padding.int_value = xfer->padding; ++ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, "HW JTAG SHIFT %s, length = %d pad = 0x%x\n", ++ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length, ++ xfer->padding); ++#endif ++ data_reg = xfer->type == JTAG_SIR_XFER ? ASPEED_JTAG_INST : ++ ASPEED_JTAG_DATA; ++ if (xfer->endstate == JTAG_STATE_SHIFTIR || ++ xfer->endstate == JTAG_STATE_SHIFTDR || ++ xfer->endstate == JTAG_STATE_PAUSEIR || ++ xfer->endstate == JTAG_STATE_PAUSEDR) { ++ scan_end = 0; ++ } else { ++ if (padding.post_pad_number) ++ scan_end = 0; ++ else ++ scan_end = 1; ++ } ++ ++ /* Perform pre padding */ ++ if (padding.pre_pad_number) { ++ struct jtag_xfer pre_xfer = { ++ .type = xfer->type, ++ .direction = JTAG_WRITE_XFER, ++ .from = xfer->from, ++ .endstate = xfer->type == JTAG_SIR_XFER ? ++ JTAG_STATE_SHIFTIR : JTAG_STATE_SHIFTDR, ++ .padding = 0, ++ .length = padding.pre_pad_number, ++ }; ++ if (padding.pre_pad_number > ASPEED_JTAG_MAX_PAD_SIZE) ++ return -EINVAL; ++ retval = aspeed_jtag_xfer_hw(aspeed_jtag, &pre_xfer, ++ padding.pad_data ? ++ aspeed_jtag->pad_data_one : ++ aspeed_jtag->pad_data_zero); ++ if (retval) ++ return retval; ++ } ++ ++ while (remain_xfer) { ++ if (xfer->direction & JTAG_WRITE_XFER) ++ aspeed_jtag_write(aspeed_jtag, data[index], data_reg); ++ else ++ aspeed_jtag_write(aspeed_jtag, 0, data_reg); ++ if (aspeed_jtag->llops->xfer_hw_fifo_delay) ++ aspeed_jtag->llops->xfer_hw_fifo_delay(); ++ ++ if (remain_xfer > ASPEED_JTAG_DATA_CHUNK_SIZE) { ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, ++ "Chunk len=%d chunk_size=%d remain_xfer=%lu\n", ++ xfer->length, ASPEED_JTAG_DATA_CHUNK_SIZE, ++ remain_xfer); ++#endif ++ shift_bits = ASPEED_JTAG_DATA_CHUNK_SIZE; ++ ++ /* ++ * Transmit bytes that were not equals to column length ++ * and after the transfer go to Pause IR/DR. ++ */ ++ if (aspeed_jtag->llops->xfer_push_data(aspeed_jtag, ++ xfer->type, ++ shift_bits) ++ != 0) { ++ return -EFAULT; ++ } ++ } else { ++ /* ++ * Read bytes equals to column length ++ */ ++ shift_bits = remain_xfer; ++ if (scan_end) { ++ /* ++ * If this data is the end of the transmission ++ * send remaining bits and go to endstate ++ */ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, ++ "Last len=%d chunk_size=%d remain_xfer=%lu\n", ++ xfer->length, ++ ASPEED_JTAG_DATA_CHUNK_SIZE, ++ remain_xfer); ++#endif ++ if (aspeed_jtag->llops->xfer_push_data_last( ++ aspeed_jtag, xfer->type, ++ shift_bits) != 0) { ++ return -EFAULT; ++ } ++ } else { ++ /* ++ * If transmission is waiting for additional ++ * data send remaining bits and then go to ++ * Pause IR/DR. ++ */ ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, ++ "Tail len=%d chunk_size=%d remain_xfer=%lu\n", ++ xfer->length, ++ ASPEED_JTAG_DATA_CHUNK_SIZE, ++ remain_xfer); ++#endif ++ if (aspeed_jtag->llops->xfer_push_data( ++ aspeed_jtag, xfer->type, ++ shift_bits) != 0) { ++ return -EFAULT; ++ } ++ } ++ } ++ ++ if (xfer->direction & JTAG_READ_XFER) { ++ if (shift_bits < ASPEED_JTAG_DATA_CHUNK_SIZE) { ++ data[index] = ++ aspeed_jtag_read(aspeed_jtag, data_reg); ++ ++ data[index] >>= ASPEED_JTAG_DATA_CHUNK_SIZE - ++ shift_bits; ++ } else { ++ data[index] = ++ aspeed_jtag_read(aspeed_jtag, data_reg); ++ } ++ if (aspeed_jtag->llops->xfer_hw_fifo_delay) ++ aspeed_jtag->llops->xfer_hw_fifo_delay(); ++ } ++ ++ remain_xfer = remain_xfer - shift_bits; ++ index++; ++ } ++ ++ /* Perform post padding */ ++ if (padding.post_pad_number) { ++ struct jtag_xfer post_xfer = { ++ .type = xfer->type, ++ .direction = JTAG_WRITE_XFER, ++ .from = xfer->from, ++ .endstate = xfer->endstate, ++ .padding = 0, ++ .length = padding.post_pad_number, ++ }; ++ if (padding.post_pad_number > ASPEED_JTAG_MAX_PAD_SIZE) ++ return -EINVAL; ++ retval = aspeed_jtag_xfer_hw(aspeed_jtag, &post_xfer, ++ padding.pad_data ? ++ aspeed_jtag->pad_data_one : ++ aspeed_jtag->pad_data_zero); ++ if (retval) ++ return retval; ++ } ++ return 0; ++} ++ ++static int aspeed_jtag_xfer(struct jtag *jtag, struct jtag_xfer *xfer, ++ u8 *xfer_data) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ if (!(aspeed_jtag->mode & JTAG_XFER_HW_MODE)) { ++ /* SW mode */ ++ aspeed_jtag_write(aspeed_jtag, ASPEED_JTAG_SW_TDIO, ++ ASPEED_JTAG_SW); ++ ++ aspeed_jtag->llops->xfer_sw(aspeed_jtag, xfer, ++ (u32 *)xfer_data); ++ } else { ++ /* HW mode */ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_SW); ++ if (aspeed_jtag->llops->xfer_hw(aspeed_jtag, xfer, ++ (u32 *)xfer_data) != 0) ++ return -EFAULT; ++ } ++ ++ aspeed_jtag->status = xfer->endstate; ++ return 0; ++} ++ ++static int aspeed_jtag_xfer_hw2(struct aspeed_jtag *aspeed_jtag, ++ struct jtag_xfer *xfer, u32 *data) ++{ ++ unsigned long remain_xfer = xfer->length; ++ unsigned long partial_xfer_size = 0; ++ unsigned long index = 0; ++ u32 shift_bits; ++ u32 data_reg; ++ u32 reg_val; ++ enum jtag_tapstate shift; ++ enum jtag_tapstate exit; ++ enum jtag_tapstate exitx; ++ enum jtag_tapstate pause; ++ enum jtag_tapstate endstate; ++ u32 start_shift; ++ u32 end_shift; ++ u32 tms_mask; ++ int ret; ++ ++ if (xfer->type == JTAG_SIR_XFER) { ++ data_reg = ASPEED_JTAG_SHDATA; ++ shift = JTAG_STATE_SHIFTIR; ++ pause = JTAG_STATE_PAUSEIR; ++ exit = JTAG_STATE_EXIT1IR; ++ exitx = JTAG_STATE_EXIT1DR; ++ } else { ++ data_reg = ASPEED_JTAG_SHDATA; ++ shift = JTAG_STATE_SHIFTDR; ++ pause = JTAG_STATE_PAUSEDR; ++ exit = JTAG_STATE_EXIT1DR; ++ exitx = JTAG_STATE_EXIT1IR; ++ } ++#ifdef DEBUG_JTAG ++ dev_dbg(aspeed_jtag->dev, ++ "HW2 JTAG SHIFT %s, length %d status %s from %s to %s then %s pad 0x%x\n", ++ (xfer->type == JTAG_SIR_XFER) ? "IR" : "DR", xfer->length, ++ end_status_str[aspeed_jtag->current_state], ++ end_status_str[xfer->from], ++ end_status_str[shift], ++ end_status_str[xfer->endstate], xfer->padding); ++#endif ++ ++ if (aspeed_jtag->current_state == shift) { ++ start_shift = 0; ++ } else { ++ start_shift = ASPEED_JTAG_SHCTRL_START_SHIFT; ++ } ++ ++ if (xfer->endstate == shift) { ++ /* ++ * In the case of shifting 1 bit of data and attempting to stay ++ * in the SHIFT state, the AST2600 JTAG Master Controller in ++ * Hardware mode 2 has been observed to go to EXIT1 IR/DR ++ * instead of staying in the SHIFT IR/DR state. The following ++ * code special cases this one bit shift and directs the state ++ * machine to go to the PAUSE IR/DR state instead. ++ * Alternatively, the application making driver calls can avoid ++ * this situation as follows: ++ * 1.) Bundle all of the shift bits together into one call ++ * AND/OR ++ * 2.) Direct all partial shifts to move to the PAUSE-IR/DR ++ * state. ++ */ ++ if (xfer->length == 1) { ++#ifdef DEBUG_JTAG ++ dev_warn(aspeed_jtag->dev, "JTAG Silicon WA: going to pause instead of shift"); ++#endif ++ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT; ++ endstate = pause; ++ } else { ++ end_shift = 0; ++ endstate = shift; ++ } ++ } else { ++ endstate = xfer->endstate; ++ end_shift = ASPEED_JTAG_SHCTRL_END_SHIFT; ++ } ++ ++ aspeed_jtag_write(aspeed_jtag, xfer->padding, ASPEED_JTAG_PADCTRL0); ++ ++ while (remain_xfer) { ++ unsigned long partial_xfer; ++ unsigned long partial_index; ++ ++ if (remain_xfer > ASPEED_JTAG_HW2_DATA_CHUNK_SIZE) ++ partial_xfer_size = ASPEED_JTAG_HW2_DATA_CHUNK_SIZE; ++ else ++ partial_xfer_size = remain_xfer; ++ ++ partial_index = index; ++ partial_xfer = partial_xfer_size; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ aspeed_jtag_write(aspeed_jtag, reg_val | ++ ASPEED_JTAG_GBLCTRL_RESET_FIFO, ++ ASPEED_JTAG_GBLCTRL); ++ ++ /* Switch internal FIFO into CPU mode */ ++ reg_val = reg_val & ~BIT(24); ++ aspeed_jtag_write(aspeed_jtag, reg_val, ++ ASPEED_JTAG_GBLCTRL); ++ ++ while (partial_xfer) { ++ if (partial_xfer > ASPEED_JTAG_DATA_CHUNK_SIZE) ++ shift_bits = ASPEED_JTAG_DATA_CHUNK_SIZE; ++ else ++ shift_bits = partial_xfer; ++ ++ if (xfer->direction & JTAG_WRITE_XFER) ++ aspeed_jtag_write(aspeed_jtag, ++ data[partial_index++], ++ data_reg); ++ else ++ aspeed_jtag_write(aspeed_jtag, 0, data_reg); ++ if (aspeed_jtag->llops->xfer_hw_fifo_delay) ++ aspeed_jtag->llops->xfer_hw_fifo_delay(); ++ partial_xfer = partial_xfer - shift_bits; ++ } ++ if (remain_xfer > ASPEED_JTAG_HW2_DATA_CHUNK_SIZE) { ++ shift_bits = ASPEED_JTAG_HW2_DATA_CHUNK_SIZE; ++ ++ /* ++ * Transmit bytes that were not equals to column length ++ * and after the transfer go to Pause IR/DR. ++ */ ++ ++ ret = aspeed_jtag_shctrl_tms_mask(aspeed_jtag->current_state, shift, exit, ++ endstate, start_shift, 0, &tms_mask); ++ if (ret) ++ return ret; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ++ ASPEED_JTAG_GBLCTRL); ++ reg_val = reg_val & ~(GENMASK(22, 20)); ++ aspeed_jtag_write(aspeed_jtag, reg_val | ++ ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE | ++ ASPEED_JTAG_GBLCTRL_UPDT_SHIFT( ++ shift_bits), ++ ASPEED_JTAG_GBLCTRL); ++ ++ aspeed_jtag_write(aspeed_jtag, tms_mask | ++ ASPEED_JTAG_SHCTRL_LWRDT_SHIFT(shift_bits), ++ ASPEED_JTAG_SHCTRL); ++ aspeed_jtag_wait_shift_complete(aspeed_jtag); ++ } else { ++ /* ++ * Read bytes equals to column length ++ */ ++ shift_bits = remain_xfer; ++ ret = aspeed_jtag_shctrl_tms_mask(aspeed_jtag->current_state, shift, exit, ++ endstate, start_shift, end_shift, ++ &tms_mask); ++ if (ret) ++ return ret; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ++ ASPEED_JTAG_GBLCTRL); ++ reg_val = reg_val & ~(GENMASK(22, 20)); ++ aspeed_jtag_write(aspeed_jtag, reg_val | ++ ASPEED_JTAG_GBLCTRL_FIFO_CTRL_MODE | ++ ASPEED_JTAG_GBLCTRL_UPDT_SHIFT( ++ shift_bits), ++ ASPEED_JTAG_GBLCTRL); ++ ++ aspeed_jtag_write(aspeed_jtag, tms_mask | ++ ASPEED_JTAG_SHCTRL_LWRDT_SHIFT( ++ shift_bits), ++ ASPEED_JTAG_SHCTRL); ++ ++ aspeed_jtag_wait_shift_complete(aspeed_jtag); ++ } ++ ++ partial_index = index; ++ partial_xfer = partial_xfer_size; ++ while (partial_xfer) { ++ if (partial_xfer > ++ ASPEED_JTAG_DATA_CHUNK_SIZE) { ++ shift_bits = ++ ASPEED_JTAG_DATA_CHUNK_SIZE; ++ data[partial_index++] = ++ aspeed_jtag_read(aspeed_jtag, ++ data_reg); ++ ++ } else { ++ shift_bits = partial_xfer; ++ data[partial_index++] = ++ aspeed_jtag_read(aspeed_jtag, ++ data_reg); ++ } ++ if (aspeed_jtag->llops->xfer_hw_fifo_delay) ++ aspeed_jtag->llops->xfer_hw_fifo_delay(); ++ partial_xfer = partial_xfer - shift_bits; ++ } ++ ++ remain_xfer = remain_xfer - partial_xfer_size; ++ index = partial_index; ++ start_shift = 0; ++ } ++ aspeed_jtag->current_state = endstate; ++ return 0; ++} ++ ++static int aspeed_jtag_status_get(struct jtag *jtag, u32 *status) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ *status = aspeed_jtag->current_state; ++ return 0; ++} ++ ++static irqreturn_t aspeed_jtag_interrupt(s32 this_irq, void *dev_id) ++{ ++ struct aspeed_jtag *aspeed_jtag = dev_id; ++ irqreturn_t ret; ++ u32 status; ++ ++ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_ISR); ++ ++ if (status & ASPEED_JTAG_ISR_INT_MASK) { ++ aspeed_jtag_write(aspeed_jtag, ++ (status & ASPEED_JTAG_ISR_INT_MASK) | ++ (status & ++ ASPEED_JTAG_ISR_INT_EN_MASK), ++ ASPEED_JTAG_ISR); ++ aspeed_jtag->flag |= status & ASPEED_JTAG_ISR_INT_MASK; ++ } ++ ++ if (aspeed_jtag->flag) { ++ wake_up_interruptible(&aspeed_jtag->jtag_wq); ++ ret = IRQ_HANDLED; ++ } else { ++ dev_err(aspeed_jtag->dev, "irq status:%x\n", status); ++ ret = IRQ_NONE; ++ } ++ return ret; ++} ++ ++static irqreturn_t aspeed_jtag_interrupt_hw2(s32 this_irq, void *dev_id) ++{ ++ struct aspeed_jtag *aspeed_jtag = dev_id; ++ irqreturn_t ret; ++ u32 status; ++ ++ status = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_INTCTRL); ++ ++ if (status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT) { ++ aspeed_jtag_write(aspeed_jtag, ++ status | ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT, ++ ASPEED_JTAG_INTCTRL); ++ aspeed_jtag->flag |= status & ASPEED_JTAG_INTCTRL_SHCPL_IRQ_STAT; ++ } ++ ++ if (aspeed_jtag->flag) { ++ wake_up_interruptible(&aspeed_jtag->jtag_wq); ++ ret = IRQ_HANDLED; ++ } else { ++ dev_err(aspeed_jtag->dev, "irq status:%x\n", status); ++ ret = IRQ_NONE; ++ } ++ return ret; ++} ++ ++static int aspeed_jtag_enable(struct jtag *jtag) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ aspeed_jtag->llops->master_enable(aspeed_jtag); ++ return 0; ++} ++ ++static int aspeed_jtag_disable(struct jtag *jtag) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ aspeed_jtag->llops->output_disable(aspeed_jtag); ++ return 0; ++} ++ ++static int aspeed_jtag_init(struct platform_device *pdev, ++ struct aspeed_jtag *aspeed_jtag) ++{ ++ struct resource *res; ++ ++ memset(aspeed_jtag->pad_data_one, ~0, ++ sizeof(aspeed_jtag->pad_data_one)); ++ memset(aspeed_jtag->pad_data_zero, 0, ++ sizeof(aspeed_jtag->pad_data_zero)); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ aspeed_jtag->reg_base = devm_ioremap_resource(aspeed_jtag->dev, res); ++ if (IS_ERR(aspeed_jtag->reg_base)) ++ return -ENOMEM; ++ ++ aspeed_jtag->pclk = devm_clk_get(aspeed_jtag->dev, NULL); ++ if (IS_ERR(aspeed_jtag->pclk)) { ++ dev_err(aspeed_jtag->dev, "devm_clk_get failed\n"); ++ return PTR_ERR(aspeed_jtag->pclk); ++ } ++ ++ aspeed_jtag->irq = platform_get_irq(pdev, 0); ++ if (aspeed_jtag->irq < 0) ++ dev_warn(aspeed_jtag->dev, ++ "no irq specified, using polling mode"); ++ ++ if (clk_prepare_enable(aspeed_jtag->pclk)) { ++ dev_err(aspeed_jtag->dev, "no irq specified\n"); ++ return -ENOENT; ++ } ++ ++ aspeed_jtag->rst = devm_reset_control_get_shared(&pdev->dev, NULL); ++ if (IS_ERR(aspeed_jtag->rst)) { ++ dev_err(aspeed_jtag->dev, ++ "missing or invalid reset controller device tree entry"); ++ return PTR_ERR(aspeed_jtag->rst); ++ } ++ reset_control_deassert(aspeed_jtag->rst); ++ ++ if (aspeed_jtag->irq >= 0) { ++ aspeed_jtag->irq = ++ devm_request_irq(aspeed_jtag->dev, aspeed_jtag->irq, ++ aspeed_jtag->llops->jtag_interrupt, 0, ++ "aspeed-jtag", aspeed_jtag); ++ if (aspeed_jtag->irq) { ++ dev_warn(aspeed_jtag->dev, ++ "unable to request IRQ, using polling mode"); ++ } ++ } ++ ++ aspeed_jtag->llops->output_disable(aspeed_jtag); ++ ++ aspeed_jtag->flag = 0; ++ aspeed_jtag->mode = 0; ++ init_waitqueue_head(&aspeed_jtag->jtag_wq); ++ return 0; ++} ++ ++static int aspeed_jtag_deinit(struct platform_device *pdev, ++ struct aspeed_jtag *aspeed_jtag) ++{ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_ISR); ++ /* Disable clock */ ++ aspeed_jtag_write(aspeed_jtag, 0, ASPEED_JTAG_CTRL); ++ reset_control_assert(aspeed_jtag->rst); ++ clk_disable_unprepare(aspeed_jtag->pclk); ++ return 0; ++} ++ ++static int aspeed_jtag_trst_set_hw1(struct aspeed_jtag *aspeed_jtag, u32 active) ++{ ++ aspeed_jtag_write(aspeed_jtag, active ? 0 : ASPEED_JTAG_EC_TRSTn_HIGH, ++ ASPEED_JTAG_EC); ++ return 0; ++} ++ ++static int aspeed_jtag_trst_set_hw2(struct aspeed_jtag *aspeed_jtag, u32 active) ++{ ++ u32 reg_val; ++ ++ reg_val = aspeed_jtag_read(aspeed_jtag, ASPEED_JTAG_GBLCTRL); ++ if (active) ++ reg_val |= ASPEED_JTAG_GBLCTRL_TRST; ++ else ++ reg_val &= ~ASPEED_JTAG_GBLCTRL_TRST; ++ aspeed_jtag_write(aspeed_jtag, reg_val, ASPEED_JTAG_GBLCTRL); ++ return 0; ++} ++ ++static int aspeed_jtag_trst_set(struct jtag *jtag, u32 active) ++{ ++ struct aspeed_jtag *aspeed_jtag = jtag_priv(jtag); ++ ++ return aspeed_jtag->llops->trst_set(aspeed_jtag, active); ++} ++ ++static const struct jtag_ops aspeed_jtag_ops = { ++ .freq_get = aspeed_jtag_freq_get, ++ .freq_set = aspeed_jtag_freq_set, ++ .status_get = aspeed_jtag_status_get, ++ .status_set = aspeed_jtag_status_set, ++ .xfer = aspeed_jtag_xfer, ++ .mode_set = aspeed_jtag_mode_set, ++ .bitbang = aspeed_jtag_bitbang, ++ .enable = aspeed_jtag_enable, ++ .disable = aspeed_jtag_disable ++}; ++ ++static const struct jtag_ops aspeed_jtag_ops_26xx = { ++#ifdef ASPEED_JTAG_HW_MODE_2_ENABLE ++ .freq_get = aspeed_jtag_freq_get_26xx, ++ .freq_set = aspeed_jtag_freq_set_26xx, ++ .status_get = aspeed_jtag_status_get, ++ .status_set = aspeed_jtag_status_set_26xx, ++#else ++ .freq_get = aspeed_jtag_freq_get, ++ .freq_set = aspeed_jtag_freq_set, ++ .status_get = aspeed_jtag_status_get, ++ .status_set = aspeed_jtag_status_set, ++#endif ++ .xfer = aspeed_jtag_xfer, ++ .mode_set = aspeed_jtag_mode_set, ++ .trst_set = aspeed_jtag_trst_set, ++ .bitbang = aspeed_jtag_bitbang, ++ .enable = aspeed_jtag_enable, ++ .disable = aspeed_jtag_disable ++}; ++ ++static const struct jtag_low_level_functions ast25xx_llops = { ++ .master_enable = aspeed_jtag_master, ++ .output_disable = aspeed_jtag_output_disable, ++ .xfer_push_data = aspeed_jtag_xfer_push_data, ++ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last, ++ .xfer_sw = aspeed_jtag_xfer_sw, ++ .xfer_hw = aspeed_jtag_xfer_hw, ++ .xfer_hw_fifo_delay = NULL, ++ .xfer_sw_delay = NULL, ++ .jtag_interrupt = aspeed_jtag_interrupt, ++ .trst_set = aspeed_jtag_trst_set_hw1 ++}; ++ ++static const struct aspeed_jtag_functions ast25xx_functions = { ++ .aspeed_jtag_ops = &aspeed_jtag_ops, ++ .aspeed_jtag_llops = &ast25xx_llops ++}; ++ ++static const struct jtag_low_level_functions ast26xx_llops = { ++#ifdef ASPEED_JTAG_HW_MODE_2_ENABLE ++ .master_enable = aspeed_jtag_master_26xx, ++ .output_disable = aspeed_jtag_output_disable_26xx, ++ .xfer_push_data = aspeed_jtag_xfer_push_data_26xx, ++ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last_26xx, ++ .xfer_sw = aspeed_jtag_xfer_sw, ++ .xfer_hw = aspeed_jtag_xfer_hw2, ++ .xfer_hw_fifo_delay = aspeed_jtag_xfer_hw_fifo_delay_26xx, ++ .xfer_sw_delay = aspeed_jtag_sw_delay_26xx, ++ .jtag_interrupt = aspeed_jtag_interrupt_hw2, ++ .trst_set = aspeed_jtag_trst_set_hw2 ++#else ++ .master_enable = aspeed_jtag_master, ++ .output_disable = aspeed_jtag_output_disable, ++ .xfer_push_data = aspeed_jtag_xfer_push_data_26xx, ++ .xfer_push_data_last = aspeed_jtag_xfer_push_data_last_26xx, ++ .xfer_sw = aspeed_jtag_xfer_sw, ++ .xfer_hw = aspeed_jtag_xfer_hw, ++ .xfer_hw_fifo_delay = aspeed_jtag_xfer_hw_fifo_delay_26xx, ++ .xfer_sw_delay = aspeed_jtag_sw_delay_26xx, ++ .jtag_interrupt = aspeed_jtag_interrupt, ++ .trst_set = aspeed_jtag_trst_set_hw1 ++#endif ++}; ++ ++static const struct aspeed_jtag_functions ast26xx_functions = { ++ .aspeed_jtag_ops = &aspeed_jtag_ops_26xx, ++ .aspeed_jtag_llops = &ast26xx_llops ++}; ++ ++static const struct of_device_id aspeed_jtag_of_match[] = { ++ { .compatible = "aspeed,ast2400-jtag", .data = &ast25xx_functions }, ++ { .compatible = "aspeed,ast2500-jtag", .data = &ast25xx_functions }, ++ { .compatible = "aspeed,ast2600-jtag", .data = &ast26xx_functions }, ++ { .compatible = "aspeed,ast2700-jtag", .data = &ast26xx_functions }, ++ {} ++}; ++ ++static int aspeed_jtag_probe(struct platform_device *pdev) ++{ ++ struct aspeed_jtag *aspeed_jtag; ++ struct jtag *jtag; ++ const struct of_device_id *match; ++ const struct aspeed_jtag_functions *jtag_functions; ++ int err; ++ ++ match = of_match_node(aspeed_jtag_of_match, pdev->dev.of_node); ++ if (!match) ++ return -ENODEV; ++ jtag_functions = match->data; ++ ++ jtag = jtag_alloc(&pdev->dev, sizeof(*aspeed_jtag), ++ jtag_functions->aspeed_jtag_ops); ++ if (!jtag) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, jtag); ++ aspeed_jtag = jtag_priv(jtag); ++ aspeed_jtag->dev = &pdev->dev; ++ ++ aspeed_jtag->llops = jtag_functions->aspeed_jtag_llops; ++ ++ /* Initialize device*/ ++ err = aspeed_jtag_init(pdev, aspeed_jtag); ++ if (err) ++ goto err_jtag_init; ++ ++ /* Initialize JTAG core structure*/ ++ err = devm_jtag_register(aspeed_jtag->dev, jtag); ++ if (err) ++ goto err_jtag_register; ++ ++ jtag_functions->aspeed_jtag_ops->freq_set(jtag, 1000000); ++ ++ return 0; ++ ++err_jtag_register: ++ aspeed_jtag_deinit(pdev, aspeed_jtag); ++err_jtag_init: ++ jtag_free(jtag); ++ return err; ++} ++ ++static void aspeed_jtag_remove(struct platform_device *pdev) ++{ ++ struct jtag *jtag = platform_get_drvdata(pdev); ++ ++ aspeed_jtag_deinit(pdev, jtag_priv(jtag)); ++} ++ ++static struct platform_driver aspeed_jtag_driver = { ++ .probe = aspeed_jtag_probe, ++ .remove = aspeed_jtag_remove, ++ .driver = { ++ .name = ASPEED_JTAG_NAME, ++ .of_match_table = aspeed_jtag_of_match, ++ }, ++}; ++module_platform_driver(aspeed_jtag_driver); ++ ++MODULE_AUTHOR("Oleksandr Shamray "); ++MODULE_DESCRIPTION("ASPEED JTAG driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/jtag/jtag.c b/drivers/jtag/jtag.c +--- a/drivers/jtag/jtag.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/jtag/jtag.c 2025-12-23 10:16:17.210098268 +0000 +@@ -0,0 +1,387 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++// Copyright (c) 2018 Mellanox Technologies. All rights reserved. ++// Copyright (c) 2018 Oleksandr Shamray ++// Copyright (c) 2019 Intel Corporation ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static char *end_status_str[] = { "tlr", "idle", "selDR", "capDR", "sDR", ++ "ex1DR", "pDR", "ex2DR", "updDR", "selIR", ++ "capIR", "sIR", "ex1IR", "pIR", "ex2IR", ++ "updIR", "current" }; ++ ++struct jtag { ++ struct miscdevice miscdev; ++ const struct jtag_ops *ops; ++ int id; ++ unsigned long *priv; ++}; ++ ++static DEFINE_IDA(jtag_ida); ++ ++void *jtag_priv(struct jtag *jtag) ++{ ++ return jtag->priv; ++} ++EXPORT_SYMBOL_GPL(jtag_priv); ++ ++static long jtag_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct jtag *jtag = file->private_data; ++ struct jtag_tap_state tapstate; ++ struct jtag_xfer xfer; ++ struct bitbang_packet bitbang; ++ struct tck_bitbang *bitbang_data; ++ struct jtag_mode mode; ++ u8 *xfer_data; ++ u32 data_size; ++ u32 value; ++ u32 active; ++ int err; ++ ++ if (!arg) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case JTAG_GIOCFREQ: ++ if (!jtag->ops->freq_get) ++ return -EOPNOTSUPP; ++ ++ err = jtag->ops->freq_get(jtag, &value); ++ if (err) ++ break; ++ dev_dbg(jtag->miscdev.parent, "JTAG_GIOCFREQ: freq get = %d", ++ value); ++ ++ if (put_user(value, (__u32 __user *)arg)) ++ err = -EFAULT; ++ break; ++ ++ case JTAG_SIOCFREQ: ++ if (!jtag->ops->freq_set) ++ return -EOPNOTSUPP; ++ ++ if (get_user(value, (__u32 __user *)arg)) ++ return -EFAULT; ++ if (value == 0) ++ return -EINVAL; ++ ++ err = jtag->ops->freq_set(jtag, value); ++ dev_dbg(jtag->miscdev.parent, "JTAG_SIOCFREQ: freq set = %d", ++ value); ++ break; ++ ++ case JTAG_SIOCSTATE: ++ if (copy_from_user(&tapstate, (const void __user *)arg, ++ sizeof(struct jtag_tap_state))) ++ return -EFAULT; ++ ++ if (tapstate.from > JTAG_STATE_CURRENT) ++ return -EINVAL; ++ ++ if (tapstate.endstate > JTAG_STATE_CURRENT) ++ return -EINVAL; ++ ++ if (tapstate.reset > JTAG_FORCE_RESET) ++ return -EINVAL; ++ ++ dev_dbg(jtag->miscdev.parent, ++ "JTAG_SIOCSTATE: status set from %s to %s reset %d tck %d", ++ end_status_str[tapstate.from], ++ end_status_str[tapstate.endstate], tapstate.reset, ++ tapstate.tck); ++ ++ err = jtag->ops->status_set(jtag, &tapstate); ++ break; ++ ++ case JTAG_IOCXFER: ++ { ++ u8 ubit_mask = GENMASK(7, 0); ++ u8 remaining_bits = 0x0; ++ union pad_config padding; ++ ++ if (copy_from_user(&xfer, (const void __user *)arg, ++ sizeof(struct jtag_xfer))) ++ return -EFAULT; ++ ++ if (xfer.length >= JTAG_MAX_XFER_DATA_LEN) ++ return -EINVAL; ++ ++ if (xfer.type > JTAG_SDR_XFER) ++ return -EINVAL; ++ ++ if (xfer.direction > JTAG_READ_WRITE_XFER) ++ return -EINVAL; ++ ++ if (xfer.from > JTAG_STATE_CURRENT) ++ return -EINVAL; ++ ++ if (xfer.endstate > JTAG_STATE_CURRENT) ++ return -EINVAL; ++ ++ data_size = DIV_ROUND_UP(xfer.length, BITS_PER_BYTE); ++ xfer_data = memdup_user(u64_to_user_ptr(xfer.tdio), data_size); ++ ++ /* Save unused remaining bits in this transfer */ ++ if ((xfer.length % BITS_PER_BYTE)) { ++ ubit_mask = GENMASK((xfer.length % BITS_PER_BYTE) - 1, ++ 0); ++ remaining_bits = xfer_data[data_size - 1] & ~ubit_mask; ++ } ++ ++ if (IS_ERR(xfer_data)) ++ return -EFAULT; ++ padding.int_value = xfer.padding; ++ dev_dbg(jtag->miscdev.parent, ++ "JTAG_IOCXFER: type: %s direction: %d, END : %s, padding: (value: %d) pre_pad: %d post_pad: %d, len: %d\n", ++ xfer.type ? "DR" : "IR", xfer.direction, ++ end_status_str[xfer.endstate], padding.pad_data, ++ padding.pre_pad_number, padding.post_pad_number, ++ xfer.length); ++ ++ print_hex_dump_debug("I:", DUMP_PREFIX_NONE, 16, 1, xfer_data, ++ data_size, false); ++ ++ err = jtag->ops->xfer(jtag, &xfer, xfer_data); ++ if (err) { ++ kfree(xfer_data); ++ return err; ++ } ++ ++ print_hex_dump_debug("O:", DUMP_PREFIX_NONE, 16, 1, xfer_data, ++ data_size, false); ++ ++ /* Restore unused remaining bits in this transfer */ ++ xfer_data[data_size - 1] = (xfer_data[data_size - 1] ++ & ubit_mask) | remaining_bits; ++ ++ err = copy_to_user(u64_to_user_ptr(xfer.tdio), ++ (void *)xfer_data, data_size); ++ kfree(xfer_data); ++ if (err) ++ return -EFAULT; ++ ++ if (copy_to_user((void __user *)arg, (void *)&xfer, ++ sizeof(struct jtag_xfer))) ++ return -EFAULT; ++ break; ++ } ++ ++ case JTAG_GIOCSTATUS: ++ err = jtag->ops->status_get(jtag, &value); ++ if (err) ++ break; ++ dev_dbg(jtag->miscdev.parent, "JTAG_GIOCSTATUS: status get %s", ++ end_status_str[value]); ++ ++ err = put_user(value, (__u32 __user *)arg); ++ break; ++ case JTAG_IOCBITBANG: ++ if (copy_from_user(&bitbang, (const void __user *)arg, ++ sizeof(struct bitbang_packet))) ++ return -EFAULT; ++ ++ if (bitbang.length >= JTAG_MAX_XFER_DATA_LEN) ++ return -EINVAL; ++ ++ data_size = bitbang.length * sizeof(struct tck_bitbang); ++ bitbang_data = memdup_user((void __user *)bitbang.data, ++ data_size); ++ if (IS_ERR(bitbang_data)) ++ return -EFAULT; ++ ++ err = jtag->ops->bitbang(jtag, &bitbang, bitbang_data); ++ if (err) { ++ kfree(bitbang_data); ++ return err; ++ } ++ err = copy_to_user((void __user *)bitbang.data, ++ (void *)bitbang_data, data_size); ++ kfree(bitbang_data); ++ if (err) ++ return -EFAULT; ++ break; ++ case JTAG_SIOCMODE: ++ if (!jtag->ops->mode_set) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&mode, (const void __user *)arg, ++ sizeof(struct jtag_mode))) ++ return -EFAULT; ++ ++ dev_dbg(jtag->miscdev.parent, ++ "JTAG_SIOCMODE: mode set feature %d mode %d", ++ mode.feature, mode.mode); ++ err = jtag->ops->mode_set(jtag, &mode); ++ break; ++ case JTAG_SIOCTRST: ++ if (!jtag->ops->trst_set) ++ return -EOPNOTSUPP; ++ ++ if (get_user(active, (__u32 __user *)arg)) ++ return -EFAULT; ++ ++ dev_dbg(jtag->miscdev.parent, ++ "JTAG_SIOCTRST: active %d", active); ++ ++ err = jtag->ops->trst_set(jtag, active); ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ return err; ++} ++ ++static int jtag_open(struct inode *inode, struct file *file) ++{ ++ struct jtag *jtag = container_of(file->private_data, ++ struct jtag, ++ miscdev); ++ ++ file->private_data = jtag; ++ if (jtag->ops->enable(jtag)) ++ return -EBUSY; ++ return nonseekable_open(inode, file); ++} ++ ++static int jtag_release(struct inode *inode, struct file *file) ++{ ++ struct jtag *jtag = file->private_data; ++ ++ if (jtag->ops->disable(jtag)) ++ return -EBUSY; ++ return 0; ++} ++ ++static const struct file_operations jtag_fops = { ++ .owner = THIS_MODULE, ++ .open = jtag_open, ++ .llseek = noop_llseek, ++ .unlocked_ioctl = jtag_ioctl, ++ .release = jtag_release, ++}; ++ ++struct jtag *jtag_alloc(struct device *host, size_t priv_size, ++ const struct jtag_ops *ops) ++{ ++ struct jtag *jtag; ++ ++ if (!host) ++ return NULL; ++ ++ if (!ops) ++ return NULL; ++ ++ if (!ops->status_set || !ops->status_get || !ops->xfer) ++ return NULL; ++ ++ jtag = kzalloc(sizeof(*jtag), GFP_KERNEL); ++ if (!jtag) ++ return NULL; ++ jtag->priv = kzalloc(priv_size, GFP_KERNEL); ++ if (!jtag->priv) ++ return NULL; ++ ++ jtag->ops = ops; ++ jtag->miscdev.parent = host; ++ ++ return jtag; ++} ++EXPORT_SYMBOL_GPL(jtag_alloc); ++ ++void jtag_free(struct jtag *jtag) ++{ ++ kfree(jtag); ++} ++EXPORT_SYMBOL_GPL(jtag_free); ++ ++static int jtag_register(struct jtag *jtag) ++{ ++ struct device *dev = jtag->miscdev.parent; ++ int err; ++ int id; ++ ++ if (!dev) ++ return -ENODEV; ++ ++ id = ida_simple_get(&jtag_ida, 0, 0, GFP_KERNEL); ++ if (id < 0) ++ return id; ++ ++ jtag->id = id; ++ ++ jtag->miscdev.fops = &jtag_fops; ++ jtag->miscdev.minor = MISC_DYNAMIC_MINOR; ++ jtag->miscdev.name = kasprintf(GFP_KERNEL, "jtag%d", id); ++ if (!jtag->miscdev.name) { ++ err = -ENOMEM; ++ goto err_jtag_alloc; ++ } ++ ++ err = misc_register(&jtag->miscdev); ++ if (err) { ++ dev_err(jtag->miscdev.parent, "Unable to register device\n"); ++ goto err_jtag_name; ++ } ++ return 0; ++ ++err_jtag_name: ++ kfree(jtag->miscdev.name); ++err_jtag_alloc: ++ ida_simple_remove(&jtag_ida, id); ++ return err; ++} ++ ++static void jtag_unregister(struct jtag *jtag) ++{ ++ misc_deregister(&jtag->miscdev); ++ kfree(jtag->miscdev.name); ++ ida_simple_remove(&jtag_ida, jtag->id); ++} ++ ++static void devm_jtag_unregister(struct device *dev, void *res) ++{ ++ jtag_unregister(*(struct jtag **)res); ++} ++ ++int devm_jtag_register(struct device *dev, struct jtag *jtag) ++{ ++ struct jtag **ptr; ++ int ret; ++ ++ ptr = devres_alloc(devm_jtag_unregister, sizeof(struct jtag *), ++ GFP_KERNEL); ++ if (!ptr) ++ return -ENOMEM; ++ ++ ret = jtag_register(jtag); ++ if (!ret) { ++ *ptr = jtag; ++ devres_add(dev, ptr); ++ } else { ++ devres_free(ptr); ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(devm_jtag_register); ++ ++static void __exit jtag_exit(void) ++{ ++ ida_destroy(&jtag_ida); ++} ++ ++module_exit(jtag_exit); ++ ++MODULE_AUTHOR("Oleksandr Shamray "); ++MODULE_DESCRIPTION("Generic jtag support"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig +--- a/drivers/mailbox/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/mailbox/Kconfig 2025-12-23 10:16:09.399229222 +0000 +@@ -295,4 +295,12 @@ + acts as an interrupt controller for receiving interrupts from clients. + Say Y here if you want to build this driver. + ++config AST2700_MBOX ++ tristate "ASPEED AST2700 IPC driver" ++ depends on ARCH_ASPEED ++ help ++ Say y here to enable support for the AST2700 IPC mailbox driver, ++ providing an interface for invoking the inter-process communication ++ signals from the application processor to other masters. ++ + endif +diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile +--- a/drivers/mailbox/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/mailbox/Makefile 2025-12-23 10:16:13.555159528 +0000 +@@ -64,3 +64,5 @@ + obj-$(CONFIG_QCOM_CPUCP_MBOX) += qcom-cpucp-mbox.o + + obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o ++ ++obj-$(CONFIG_AST2700_MBOX) += ast2700-mailbox.o +diff --git a/drivers/mailbox/ast2700-mailbox.c b/drivers/mailbox/ast2700-mailbox.c +--- a/drivers/mailbox/ast2700-mailbox.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/mailbox/ast2700-mailbox.c 2025-12-23 10:16:21.043034027 +0000 +@@ -0,0 +1,235 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Each bit in the register represents an IPC ID */ ++#define IPCR_TX_TRIG 0x00 ++#define IPCR_ENABLE 0x04 ++#define IPCR_STATUS 0x08 ++#define RX_IRQ(n) BIT(n) ++#define RX_IRQ_MASK 0xf ++#define IPCR_DATA 0x10 ++ ++struct ast2700_mbox_data { ++ u8 num_chans; ++ u8 msg_size; ++}; ++ ++struct ast2700_mbox { ++ struct mbox_controller mbox; ++ u8 msg_size; ++ void __iomem *tx_regs; ++ void __iomem *rx_regs; ++ spinlock_t lock; /* control register lock */ ++}; ++ ++static inline int ch_num(struct mbox_chan *chan) ++{ ++ return chan - chan->mbox->chans; ++} ++ ++static inline bool ast2700_mbox_tx_done(struct ast2700_mbox *mb, int idx) ++{ ++ return !(readl(mb->tx_regs + IPCR_STATUS) & BIT(idx)); ++} ++ ++static irqreturn_t ast2700_mbox_irq(int irq, void *p) ++{ ++ struct ast2700_mbox *mb = p; ++ void __iomem *data_reg; ++ int num_words = mb->msg_size / sizeof(u32); ++ u32 *word_data; ++ u32 status; ++ int n, i; ++ ++ /* Only examine channels that are currently enabled. */ ++ status = readl(mb->rx_regs + IPCR_ENABLE) & ++ readl(mb->rx_regs + IPCR_STATUS); ++ ++ if (!(status & RX_IRQ_MASK)) ++ return IRQ_NONE; ++ ++ for (n = 0; n < mb->mbox.num_chans; ++n) { ++ struct mbox_chan *chan = &mb->mbox.chans[n]; ++ ++ if (!(status & RX_IRQ(n))) ++ continue; ++ ++ data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n; ++ word_data = chan->con_priv; ++ /* Read the message data */ ++ for (i = 0; i < num_words; i++) ++ word_data[i] = readl(data_reg + i * sizeof(u32)); ++ ++ mbox_chan_received_data(chan, chan->con_priv); ++ ++ /* The IRQ can be cleared only once the FIFO is empty. */ ++ writel(RX_IRQ(n), mb->rx_regs + IPCR_STATUS); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int ast2700_mbox_send_data(struct mbox_chan *chan, void *data) ++{ ++ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); ++ int idx = ch_num(chan); ++ void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx; ++ u32 *word_data = data; ++ int num_words = mb->msg_size / sizeof(u32); ++ int i; ++ ++ if (!(readl(mb->tx_regs + IPCR_ENABLE) & BIT(idx))) { ++ dev_warn(mb->mbox.dev, "%s: Ch-%d not enabled yet\n", __func__, idx); ++ return -ENODEV; ++ } ++ ++ if (!(ast2700_mbox_tx_done(mb, idx))) { ++ dev_warn(mb->mbox.dev, "%s: Ch-%d last data has not finished\n", __func__, idx); ++ return -EBUSY; ++ } ++ ++ /* Write the message data */ ++ for (i = 0 ; i < num_words; i++) ++ writel(word_data[i], data_reg + i * sizeof(u32)); ++ ++ writel(BIT(idx), mb->tx_regs + IPCR_TX_TRIG); ++ dev_dbg(mb->mbox.dev, "%s: Ch-%d sent\n", __func__, idx); ++ ++ return 0; ++} ++ ++static int ast2700_mbox_startup(struct mbox_chan *chan) ++{ ++ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); ++ int idx = ch_num(chan); ++ void __iomem *reg = mb->rx_regs + IPCR_ENABLE; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&mb->lock, flags); ++ writel(readl(reg) | BIT(idx), reg); ++ spin_unlock_irqrestore(&mb->lock, flags); ++ ++ return 0; ++} ++ ++static void ast2700_mbox_shutdown(struct mbox_chan *chan) ++{ ++ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); ++ int idx = ch_num(chan); ++ void __iomem *reg = mb->rx_regs + IPCR_ENABLE; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&mb->lock, flags); ++ writel(readl(reg) & ~BIT(idx), reg); ++ spin_unlock_irqrestore(&mb->lock, flags); ++} ++ ++static bool ast2700_mbox_last_tx_done(struct mbox_chan *chan) ++{ ++ struct ast2700_mbox *mb = dev_get_drvdata(chan->mbox->dev); ++ int idx = ch_num(chan); ++ ++ return ast2700_mbox_tx_done(mb, idx); ++} ++ ++static const struct mbox_chan_ops ast2700_mbox_chan_ops = { ++ .send_data = ast2700_mbox_send_data, ++ .startup = ast2700_mbox_startup, ++ .shutdown = ast2700_mbox_shutdown, ++ .last_tx_done = ast2700_mbox_last_tx_done, ++}; ++ ++static int ast2700_mbox_probe(struct platform_device *pdev) ++{ ++ struct ast2700_mbox *mb; ++ const struct ast2700_mbox_data *dev_data; ++ struct device *dev = &pdev->dev; ++ int irq, ret; ++ ++ if (!pdev->dev.of_node) ++ return -ENODEV; ++ ++ dev_data = device_get_match_data(&pdev->dev); ++ ++ mb = devm_kzalloc(dev, sizeof(*mb), GFP_KERNEL); ++ if (!mb) ++ return -ENOMEM; ++ ++ mb->mbox.chans = devm_kcalloc(&pdev->dev, dev_data->num_chans, ++ sizeof(*mb->mbox.chans), GFP_KERNEL); ++ if (!mb->mbox.chans) ++ return -ENOMEM; ++ ++ /* con_priv of each channel is used to store the message received */ ++ for (int i = 0; i < dev_data->num_chans; i++) { ++ mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size, ++ sizeof(u8), GFP_KERNEL); ++ if (!mb->mbox.chans[i].con_priv) ++ return -ENOMEM; ++ } ++ ++ platform_set_drvdata(pdev, mb); ++ ++ mb->tx_regs = devm_platform_ioremap_resource_byname(pdev, "tx"); ++ if (IS_ERR(mb->tx_regs)) ++ return PTR_ERR(mb->tx_regs); ++ ++ mb->rx_regs = devm_platform_ioremap_resource_byname(pdev, "rx"); ++ if (IS_ERR(mb->rx_regs)) ++ return PTR_ERR(mb->rx_regs); ++ ++ mb->msg_size = dev_data->msg_size; ++ mb->mbox.dev = dev; ++ mb->mbox.num_chans = dev_data->num_chans; ++ mb->mbox.ops = &ast2700_mbox_chan_ops; ++ mb->mbox.txdone_irq = false; ++ mb->mbox.txdone_poll = true; ++ mb->mbox.txpoll_period = 5; ++ spin_lock_init(&mb->lock); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ ret = devm_request_irq(dev, irq, ast2700_mbox_irq, 0, dev_name(dev), mb); ++ if (ret) ++ return ret; ++ ++ return devm_mbox_controller_register(dev, &mb->mbox); ++} ++ ++static const struct ast2700_mbox_data ast2700_dev_data = { ++ .num_chans = 4, ++ .msg_size = 0x20, ++}; ++ ++static const struct of_device_id ast2700_mbox_of_match[] = { ++ { .compatible = "aspeed,ast2700-mailbox", .data = &ast2700_dev_data }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, ast2700_mbox_of_match); ++ ++static struct platform_driver ast2700_mbox_driver = { ++ .driver = { ++ .name = "ast2700-mailbox", ++ .of_match_table = ast2700_mbox_of_match, ++ }, ++ .probe = ast2700_mbox_probe, ++}; ++module_platform_driver(ast2700_mbox_driver); ++ ++MODULE_AUTHOR("Jammy Huang "); ++MODULE_DESCRIPTION("ASPEED AST2700 IPC driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c +--- a/drivers/media/platform/aspeed/aspeed-video.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/media/platform/aspeed/aspeed-video.c 2025-12-23 10:16:21.131032552 +0000 +@@ -4,6 +4,7 @@ + + #include + #include ++#include + #include + #include + #include +@@ -25,6 +26,9 @@ + #include + #include + #include ++#include ++#include ++#include + #include + #include + #include +@@ -96,6 +100,7 @@ + #define VE_CTRL_INTERLACE BIT(14) + #define VE_CTRL_HSYNC_POL_CTRL BIT(15) + #define VE_CTRL_FRC GENMASK(23, 16) ++#define AST2600_VE_CTRL_EN_COMPARE_ONLY BIT(31) + + #define VE_TGS_0 0x00c + #define VE_TGS_1 0x010 +@@ -149,6 +154,8 @@ + #define AST2400_VE_COMP_SIZE_READ_BACK 0x078 + #define AST2600_VE_COMP_SIZE_READ_BACK 0x084 + ++#define VE_COMP_FRAME_COUNT_READ_BACK 0x07C ++ + #define VE_SRC_LR_EDGE_DET 0x090 + #define VE_SRC_LR_EDGE_DET_LEFT GENMASK(11, 0) + #define VE_SRC_LR_EDGE_DET_NO_V BIT(12) +@@ -203,6 +210,51 @@ + #define VE_MEM_RESTRICT_START 0x310 + #define VE_MEM_RESTRICT_END 0x314 + ++/* SCU's registers */ ++#define SCU_MISC_CTRL 0xC0 ++#define SCU_DPLL_SOURCE BIT(20) ++ ++#define SCU_CLK_SEL 0x288 ++#define SCU_SOC_DISPLAY_SEL BIT(15) ++ ++#define SCU_CLK_SEL2 0x304 ++#define SCU_VIDEO_OUTPUT_DELAY GENMASK(5, 0) ++ ++#define SCU_CRT2CLK 0x350 ++#define SCU_CRT2CLK_N GENMASK(31, 16) ++#define SCU_CRT2CLK_R GENMASK(15, 0) ++ ++#define SCU_MULTI_FUNC_12 0x440 ++#define SCU_MULTI_FUNC_CPU_SLI_DIR BIT(5) ++#define SCU_MULTI_FUNC_15 0x454 ++#define SCU_MULTI_FUNC_IO_SLI_DIR BIT(21) ++ ++/* GFX's registers */ ++#define GFX_CTRL 0x60 ++#define GFX_CTRL_ENABLE BIT(0) ++#define GFX_CTRL_FMT GENMASK(9, 7) ++ ++#define GFX_H_DISPLAY 0x70 ++#define GFX_H_DISPLAY_DE GENMASK(28, 16) ++#define GFX_H_DISPLAY_TOTAL GENMASK(12, 0) ++ ++#define GFX_V_DISPLAY 0x78 ++#define GFX_V_DISPLAY_DE GENMASK(27, 16) ++#define GFX_V_DISPLAY_TOTAL GENMASK(11, 0) ++ ++#define GFX_DISPLAY_ADDR 0x80 ++ ++enum { ++ VIDEO_CLK_25MHz = 0, ++ VIDEO_CLK_D1, ++ VIDEO_CLK_D2, ++ VIDEO_CLK_CRT1, ++ VIDEO_CLK_CRT2, ++ VIDEO_CLK_HPLL, ++ VIDEO_CLK_MPLL, ++ VIDEO_CLK_48MHz, ++}; ++ + /* + * VIDEO_MODE_DETECT_DONE: a flag raised if signal lock + * VIDEO_RES_CHANGE: a flag raised if res_change work on-going +@@ -211,6 +263,7 @@ + * VIDEO_FRAME_INPRG: a flag raised if hw working on a frame + * VIDEO_STOPPED: a flag raised if device release + * VIDEO_CLOCKS_ON: a flag raised if clk is on ++ * VIDEO_BOUNDING_BOX: a flag raised if box-finding for partial-jpeg + */ + enum { + VIDEO_MODE_DETECT_DONE, +@@ -220,12 +273,14 @@ + VIDEO_FRAME_INPRG, + VIDEO_STOPPED, + VIDEO_CLOCKS_ON, ++ VIDEO_BOUNDING_BOX, + }; + + enum aspeed_video_format { + VIDEO_FMT_STANDARD = 0, + VIDEO_FMT_ASPEED, +- VIDEO_FMT_MAX = VIDEO_FMT_ASPEED ++ VIDEO_FMT_PARTIAL, ++ VIDEO_FMT_MAX = VIDEO_FMT_PARTIAL + }; + + // for VE_CTRL_CAPTURE_FMT +@@ -243,6 +298,11 @@ + void *virt; + }; + ++struct aspeed_video_box { ++ struct v4l2_rect box; ++ struct list_head link; ++}; ++ + struct aspeed_video_buffer { + struct vb2_v4l2_buffer vb; + struct list_head link; +@@ -262,6 +322,9 @@ + /* + * struct aspeed_video - driver data + * ++ * version: holds the version of aspeed SoC ++ * base: holds the base address of video engine ++ * dvi_base: holds the base address of DVI engine. For 2700 dvi support. + * res_work: holds the delayed_work for res-detection if unlock + * buffers: holds the list of buffer queued from user + * flags: holds the state of video +@@ -270,9 +333,11 @@ + * srcs: holds the buffer information for srcs + * jpeg: holds the buffer information for jpeg header + * bcd: holds the buffer information for bcd work ++ * dbg_src: holds the buffer information for debug input + * yuv420: a flag raised if JPEG subsampling is 420 + * format: holds the video format + * hq_mode: a flag raised if HQ is enabled. Only for VIDEO_FMT_ASPEED ++ * input: holds the video input + * frame_rate: holds the frame_rate + * jpeg_quality: holds jpeq's quality (0~11) + * jpeg_hq_quality: holds hq's quality (1~12) only if hq_mode enabled +@@ -281,11 +346,16 @@ + * frame_right: end position of video data in horizontal direction + * frame_top: start position of video data in vertical direction + * perf: holds the statistics primary for debugfs ++ * bounding_box: holds the video rect for partial-jpeg ++ * boxes: holds the list of video-rect info for each partial-jpeg + */ + struct aspeed_video { + void __iomem *base; ++ void __iomem *dvi_base; + struct clk *eclk; + struct clk *vclk; ++ struct clk *crt2clk; ++ struct reset_control *reset; + + struct device *dev; + struct v4l2_ctrl_handler ctrl_handler; +@@ -297,9 +367,15 @@ + struct vb2_queue queue; + struct video_device vdev; + struct mutex video_lock; /* v4l2 and videobuf2 lock */ ++ struct dentry *debugfs_entry; ++ int id; + ++ struct regmap *scu; ++ struct regmap *gfx; ++ u32 version; + u32 jpeg_mode; + u32 comp_size_read; ++ u32 compare_only; + + wait_queue_head_t wait; + spinlock_t lock; /* buffer list lock */ +@@ -307,15 +383,20 @@ + struct list_head buffers; + unsigned long flags; + unsigned int sequence; ++ struct workqueue_struct *rst_wq; ++ struct work_struct rst_work; + + unsigned int max_compressed_size; ++ struct aspeed_video_addr pool; + struct aspeed_video_addr srcs[2]; + struct aspeed_video_addr jpeg; + struct aspeed_video_addr bcd; ++ struct aspeed_video_addr dbg_src; + + bool yuv420; + enum aspeed_video_format format; + bool hq_mode; ++ enum aspeed_video_input input; + unsigned int frame_rate; + unsigned int jpeg_quality; + unsigned int jpeg_hq_quality; +@@ -326,28 +407,45 @@ + unsigned int frame_top; + + struct aspeed_video_perf perf; ++ struct v4l2_rect bounding_box; ++ struct list_head boxes; + }; + + #define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev) + + struct aspeed_video_config { ++ u32 version; + u32 jpeg_mode; + u32 comp_size_read; ++ u32 compare_only; + }; + + static const struct aspeed_video_config ast2400_config = { ++ .version = 4, + .jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, ++ .compare_only = 0, + }; + + static const struct aspeed_video_config ast2500_config = { ++ .version = 5, + .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, ++ .compare_only = 0, + }; + + static const struct aspeed_video_config ast2600_config = { ++ .version = 6, ++ .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, ++ .comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK, ++ .compare_only = AST2600_VE_CTRL_EN_COMPARE_ONLY, ++}; ++ ++static const struct aspeed_video_config ast2700_config = { ++ .version = 7, + .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, + .comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK, ++ .compare_only = AST2600_VE_CTRL_EN_COMPARE_ONLY, + }; + + static const u32 aspeed_video_jpeg_header[ASPEED_VIDEO_JPEG_HEADER_SIZE] = { +@@ -484,9 +582,12 @@ + }; + + static const char * const format_str[] = {"Standard JPEG", +- "Aspeed JPEG"}; ++ "Aspeed JPEG", "Partial JPEG"}; ++static const char * const input_str[] = {"HOST VGA", "BMC GFX", "MEMORY", "DVI"}; + + static unsigned int debug; ++static unsigned int dual_flag; ++DECLARE_WAIT_QUEUE_HEAD(waitq); + + static bool aspeed_video_alloc_buf(struct aspeed_video *video, + struct aspeed_video_addr *addr, +@@ -495,6 +596,22 @@ + static void aspeed_video_free_buf(struct aspeed_video *video, + struct aspeed_video_addr *addr); + ++/** ++ * _make_addr - make address fit for ast2700 ++ * @addr: dma address for hardware to work ++ * ++ * Return: 32bit format of address ++ */ ++static inline u32 _make_addr(dma_addr_t addr) ++{ ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ // In ast2700, it store higt byte[35:32] in low byte[3:0] ++ return (addr >> 32) | (u32)(addr); ++#else ++ return addr; ++#endif ++} ++ + static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420) + { + int i; +@@ -576,13 +693,58 @@ + p->duration); + } + ++static void aspeed_video_partial_jpeg_update_regs(struct aspeed_video *v) ++{ ++ if (test_bit(VIDEO_BOUNDING_BOX, &v->flags)) { ++ aspeed_video_update(v, VE_SEQ_CTRL, ++ v->jpeg_mode, ++ VE_SEQ_CTRL_AUTO_COMP); ++ aspeed_video_update(v, VE_BCD_CTRL, 0, ++ VE_BCD_CTRL_EN_BCD); ++ aspeed_video_write(v, VE_COMP_WINDOW, ++ v->pix_fmt.width << 16 | ++ v->pix_fmt.height); ++ v4l2_dbg(1, debug, &v->v4l2_dev, ++ "%s: BCD enabled\n", __func__); ++ } else { ++ u32 scan_lines = aspeed_video_read(v, VE_SRC_SCANLINE_OFFSET); ++ u32 frame_count = aspeed_video_read(v, VE_COMP_FRAME_COUNT_READ_BACK); ++ u32 old_src_addr, new_src_addr; ++ dma_addr_t addr; ++ u32 offset; ++ ++ if (v->version >= 7) { ++ old_src_addr = (frame_count & 0x01) ? VE_SRC0_ADDR : VE_SRC1_ADDR; ++ new_src_addr = (frame_count & 0x01) ? VE_SRC1_ADDR : VE_SRC0_ADDR; ++ } else { ++ old_src_addr = VE_SRC0_ADDR; ++ new_src_addr = VE_SRC0_ADDR; ++ } ++ addr = aspeed_video_read(v, old_src_addr); ++ ++ aspeed_video_update(v, VE_SEQ_CTRL, ++ VE_SEQ_CTRL_AUTO_COMP, ++ v->jpeg_mode); ++ aspeed_video_update(v, VE_BCD_CTRL, ++ VE_BCD_CTRL_EN_BCD, 0); ++ aspeed_video_write(v, VE_COMP_WINDOW, ++ v->bounding_box.width << 16 | ++ v->bounding_box.height); ++ ++ offset = (scan_lines * v->bounding_box.top) + ++ ((256 * v->bounding_box.left) >> (v->yuv420 ? 4 : 3)); ++ aspeed_video_write(v, new_src_addr, addr + offset); ++ v4l2_dbg(1, debug, &v->v4l2_dev, ++ "%s: BCD disabled, frame#(%d) offset(0x%x)\n", __func__, frame_count, offset); ++ } ++} ++ + static int aspeed_video_start_frame(struct aspeed_video *video) + { + dma_addr_t addr; + unsigned long flags; + struct aspeed_video_buffer *buf; + u32 seq_ctrl = aspeed_video_read(video, VE_SEQ_CTRL); +- bool bcd_buf_need = (video->format != VIDEO_FMT_STANDARD); + + if (video->v4l2_input_status) { + v4l2_dbg(1, debug, &video->v4l2_dev, "No signal; don't start frame\n"); +@@ -595,18 +757,14 @@ + return -EBUSY; + } + +- if (bcd_buf_need && !video->bcd.size) { +- if (!aspeed_video_alloc_buf(video, &video->bcd, +- VE_BCD_BUFF_SIZE)) { +- dev_err(video->dev, "Failed to allocate BCD buffer\n"); +- dev_err(video->dev, "don't start frame\n"); +- return -ENOMEM; +- } +- aspeed_video_write(video, VE_BCD_ADDR, video->bcd.dma); +- v4l2_dbg(1, debug, &video->v4l2_dev, "bcd addr(%pad) size(%d)\n", +- &video->bcd.dma, video->bcd.size); +- } else if (!bcd_buf_need && video->bcd.size) { +- aspeed_video_free_buf(video, &video->bcd); ++ if (video->input == VIDEO_INPUT_GFX) { ++ u32 val; ++ ++ // update input buffer address as gfx's ++ regmap_read(video->gfx, GFX_DISPLAY_ADDR, &val); ++ aspeed_video_write(video, VE_TGS_0, val); ++ } else if (video->input == VIDEO_INPUT_MEM) { ++ aspeed_video_write(video, VE_TGS_0, _make_addr(video->dbg_src.dma)); + } + + spin_lock_irqsave(&video->lock, flags); +@@ -624,15 +782,26 @@ + + aspeed_video_write(video, VE_COMP_PROC_OFFSET, 0); + aspeed_video_write(video, VE_COMP_OFFSET, 0); +- aspeed_video_write(video, VE_COMP_ADDR, addr); ++ aspeed_video_write(video, VE_COMP_ADDR, _make_addr(addr)); + + aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, + VE_INTERRUPT_COMP_COMPLETE); + +- video->perf.last_sample = ktime_get(); +- +- aspeed_video_update(video, VE_SEQ_CTRL, 0, +- VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP); ++ if (video->format == VIDEO_FMT_PARTIAL) { ++ aspeed_video_partial_jpeg_update_regs(video); ++ if (test_bit(VIDEO_BOUNDING_BOX, &video->flags)) { ++ video->perf.last_sample = ktime_get(); ++ seq_ctrl = VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP; ++ aspeed_video_update(video, VE_CTRL, video->compare_only, video->compare_only); ++ } else { ++ seq_ctrl = VE_SEQ_CTRL_TRIG_COMP; ++ aspeed_video_update(video, VE_CTRL, video->compare_only, 0); ++ } ++ } else { ++ video->perf.last_sample = ktime_get(); ++ seq_ctrl = VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP; ++ } ++ aspeed_video_update(video, VE_SEQ_CTRL, 0, seq_ctrl); + + return 0; + } +@@ -660,6 +829,9 @@ + aspeed_video_write(video, VE_INTERRUPT_CTRL, 0); + aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff); + ++ reset_control_assert(video->reset); ++ usleep_range(100, 200); ++ + /* Turn off the relevant clocks */ + clk_disable(video->eclk); + clk_disable(video->vclk); +@@ -676,7 +848,68 @@ + clk_enable(video->vclk); + clk_enable(video->eclk); + ++ mdelay(10); ++ reset_control_deassert(video->reset); ++ + set_bit(VIDEO_CLOCKS_ON, &video->flags); ++ ++ if (video->version >= 7) ++ queue_work(video->rst_wq, &video->rst_work); ++} ++ ++static void aspeed_video_reset(struct aspeed_video *v) ++{ ++ int rc; ++ u32 val; ++ ++ reset_control_assert(v->reset); ++ rc = reset_control_status(v->reset); ++ if (rc == 0) { ++ /* 2700 has 2 VE, but only 1 reset. To have reset work, we need ++ * to notify the other VE if reset is not asserted. ++ */ ++ val = 1 << (v->id ^ 1); ++ dual_flag |= val; ++ v4l2_dbg(2, debug, &v->v4l2_dev, "%s: reset not asserted, needs another VE(%x)\n", __func__, val); ++ wake_up_all(&waitq); ++ rc = wait_event_interruptible(waitq, (dual_flag & val) != val); ++ if (rc) ++ v4l2_dbg(2, debug, &v->v4l2_dev, "%s: another VE done, dual_flag(%d)\n", __func__, dual_flag); ++ } ++ ++ usleep_range(100, 200); ++ reset_control_deassert(v->reset); ++ udelay(1); ++} ++ ++/* ++ * aspeed_video_rst_worker: This is a work to wait event from the other VE to ++ * do full function reset because 2700's 2 VE share 1 reset line. When there ++ * is one VE wants reset, both VE needs to do it. ++ * ++ */ ++static void aspeed_video_rst_worker(struct work_struct *work) ++{ ++ struct aspeed_video *v = ++ container_of(work, struct aspeed_video, rst_work); ++ int rc; ++ ++ rc = wait_event_timeout(waitq, ++ (dual_flag & (1 << v->id)), ++ INVALID_RESOLUTION_DELAY); ++ if (rc) { ++ v4l2_dbg(2, debug, &v->v4l2_dev, "%s: dual_flag(%x)\n", __func__, dual_flag); ++ dual_flag = 0; ++ set_bit(VIDEO_RES_CHANGE, &v->flags); ++ clear_bit(VIDEO_FRAME_INPRG, &v->flags); ++ schedule_delayed_work(&v->res_work, 0); ++ wait_event_interruptible(v->wait, ++ !test_bit(VIDEO_RES_CHANGE, &v->flags)); ++ v4l2_dbg(2, debug, &v->v4l2_dev, "%s: rst and clear, %d\n", __func__, rc); ++ } ++ ++ if (test_bit(VIDEO_CLOCKS_ON, &v->flags)) ++ queue_work(v->rst_wq, &v->rst_work); + } + + static void aspeed_video_bufs_done(struct aspeed_video *video, +@@ -684,11 +917,18 @@ + { + unsigned long flags; + struct aspeed_video_buffer *buf; ++ struct aspeed_video_box *box, *tmp; + + spin_lock_irqsave(&video->lock, flags); + list_for_each_entry(buf, &video->buffers, link) + vb2_buffer_done(&buf->vb.vb2_buf, state); + INIT_LIST_HEAD(&video->buffers); ++ ++ list_for_each_entry_safe(box, tmp, &video->boxes, link) { ++ list_del(&box->link); ++ kfree(box); ++ } ++ INIT_LIST_HEAD(&video->boxes); + spin_unlock_irqrestore(&video->lock, flags); + } + +@@ -701,12 +941,96 @@ + + video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + +- aspeed_video_off(video); ++ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0); ++ aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff); + aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR); + + schedule_delayed_work(&video->res_work, delay); + } + ++static inline bool _box_data_changed(struct aspeed_video *v, u8 data) ++{ ++ if (v->version >= 6) ++ return ((data & 0xf) != 0xf); ++ ++ return ((data & 0xf) == 0xf); ++} ++ ++static void aspeed_video_get_bounding_box(struct aspeed_video *v, ++ struct v4l2_rect *box) ++{ ++ u16 min_x, min_y, max_x, max_y; ++ u16 w, h, i, j; ++ u32 bytesperline; ++ u8 mb_shift = v->yuv420 ? 4 : 3; ++ u8 *bcd_buf = v->bcd.virt; ++ ++ if (!bcd_buf) { ++ box->left = 0; ++ box->top = 0; ++ box->width = v->pix_fmt.width; ++ box->height = v->pix_fmt.width; ++ v4l2_dbg(1, debug, &v->v4l2_dev, "%s: bcd buf not ready yet\n", __func__); ++ return; ++ } ++ ++ w = v->pix_fmt.width >> mb_shift; ++ h = v->pix_fmt.height >> mb_shift; ++ v4l2_dbg(1, debug, &v->v4l2_dev, "%s: macrobox_shift(%d) size (%d * %d)\n", ++ __func__, mb_shift, w, h); ++ ++ min_x = 0x3ff; ++ min_y = 0x3ff; ++ max_x = 0; ++ max_y = 0; ++ ++ for (j = 0; j < h; j++) { ++ bytesperline = w * j; ++ for (i = 0; i < w; i++) { ++ if (_box_data_changed(v, *(bcd_buf + bytesperline + i))) { ++ min_x = min(i, min_x); ++ max_x = max(i, max_x); ++ min_y = min(j, min_y); ++ max_y = max(j, max_y); ++ ++ // skip line if max_x can't be bigger ++ if (max_x == w) ++ i = w; ++ // skip the pixels between min_x ~ max_x ++ if (max_x > min_x && i > min_x && i < max_x) ++ i = max_x; ++ } ++ } ++ } ++ v4l2_dbg(1, debug, &v->v4l2_dev, ++ "%s: left %d right %d top %d bottom %d\n", __func__, ++ min_x, max_x, min_y, max_y); ++ ++ // clear bcd flag ++ if (v->version < 6) ++ memset(bcd_buf, 0x01, (w * h)); ++ ++ // use full size every 8 frames ++ if (IS_ALIGNED(v->sequence, 8)) { ++ min_x = 0; ++ max_x = w - 1; ++ min_y = 0; ++ max_y = h - 1; ++ } else if (min_x > max_x || min_y > max_y || max_x > w || max_y > h) { ++ memset(box, 0, sizeof(*box)); ++ v4l2_dbg(1, debug, &v->v4l2_dev, "box not found\n"); ++ return; ++ } ++ ++ box->left = min_x << mb_shift; ++ box->top = min_y << mb_shift; ++ box->width = (max_x + 1 - min_x) << mb_shift; ++ box->height = (max_y + 1 - min_y) << mb_shift; ++ v4l2_dbg(1, debug, &v->v4l2_dev, ++ "%s: x: %d, y: %d, w: %d , h: %d\n", __func__, ++ box->left, box->top, box->width, box->height); ++} ++ + static void aspeed_video_swap_src_buf(struct aspeed_video *v) + { + if (v->format == VIDEO_FMT_STANDARD) +@@ -716,24 +1040,100 @@ + if (IS_ALIGNED(v->sequence, 8)) + memset((u8 *)v->bcd.virt, 0x00, VE_BCD_BUFF_SIZE); + ++ // 2700's new design will automatically swap src at each operation ++ if (v->version > 6 && v->format == VIDEO_FMT_ASPEED) ++ return; ++ + if (v->sequence & 0x01) { +- aspeed_video_write(v, VE_SRC0_ADDR, v->srcs[1].dma); +- aspeed_video_write(v, VE_SRC1_ADDR, v->srcs[0].dma); ++ aspeed_video_write(v, VE_SRC0_ADDR, _make_addr(v->srcs[1].dma)); ++ aspeed_video_write(v, VE_SRC1_ADDR, _make_addr(v->srcs[0].dma)); + } else { +- aspeed_video_write(v, VE_SRC0_ADDR, v->srcs[0].dma); +- aspeed_video_write(v, VE_SRC1_ADDR, v->srcs[1].dma); ++ aspeed_video_write(v, VE_SRC0_ADDR, _make_addr(v->srcs[0].dma)); ++ aspeed_video_write(v, VE_SRC1_ADDR, _make_addr(v->srcs[1].dma)); + } + } + ++static void aspeed_video_frame_done_handler(struct aspeed_video *video, ++ bool buf_done) ++{ ++ struct aspeed_video_buffer *buf; ++ bool empty = true; ++ u32 frame_size; ++ ++ if (!buf_done) ++ return; ++ ++ spin_lock(&video->lock); ++ clear_bit(VIDEO_FRAME_INPRG, &video->flags); ++ buf = list_first_entry_or_null(&video->buffers, ++ struct aspeed_video_buffer, ++ link); ++ if (buf) { ++ frame_size = aspeed_video_read(video, ++ video->comp_size_read); ++ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size); ++ ++ /* ++ * VIDEO_FMT_ASPEED requires continuous update. ++ * On the contrary, standard jpeg can keep last buffer ++ * to always have the latest result. ++ */ ++ if (video->format != VIDEO_FMT_ASPEED && ++ list_is_last(&buf->link, &video->buffers)) { ++ empty = false; ++ v4l2_dbg(1, debug, &video->v4l2_dev, "skip to keep last frame updated\n"); ++ } else { ++ buf->vb.vb2_buf.timestamp = ktime_get_ns(); ++ buf->vb.sequence = video->sequence++; ++ buf->vb.field = V4L2_FIELD_NONE; ++ vb2_buffer_done(&buf->vb.vb2_buf, ++ VB2_BUF_STATE_DONE); ++ list_del(&buf->link); ++ empty = list_empty(&video->buffers); ++ if (video->format == VIDEO_FMT_PARTIAL) { ++ struct aspeed_video_box *box = ++ kmalloc(sizeof(struct aspeed_video_box), ++ GFP_KERNEL); ++ ++ box->box = video->bounding_box; ++ list_add_tail(&box->link, &video->boxes); ++ } ++ } ++ } ++ spin_unlock(&video->lock); ++ ++ aspeed_video_swap_src_buf(video); ++ ++ if (test_bit(VIDEO_STREAMING, &video->flags) && !empty && ++ video->input != VIDEO_INPUT_MEM) { ++ set_bit(VIDEO_BOUNDING_BOX, &video->flags); ++ aspeed_video_start_frame(video); ++ } ++} ++ ++static irqreturn_t aspeed_video_thread_irq(int irq, void *arg) ++{ ++ struct aspeed_video *v = arg; ++ ++ aspeed_video_get_bounding_box(v, &v->bounding_box); ++ ++ if (v->bounding_box.width && v->bounding_box.height) ++ clear_bit(VIDEO_BOUNDING_BOX, &v->flags); ++ else ++ set_bit(VIDEO_BOUNDING_BOX, &v->flags); ++ ++ aspeed_video_start_frame(v); ++ ++ return IRQ_HANDLED; ++} ++ + static irqreturn_t aspeed_video_irq(int irq, void *arg) + { + struct aspeed_video *video = arg; + u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS); ++ bool get_box = false; + +- /* +- * Hardware sometimes asserts interrupts that we haven't actually +- * enabled; ignore them if so. +- */ ++ aspeed_video_write(video, VE_INTERRUPT_STATUS, sts); + sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL); + + v4l2_dbg(2, debug, &video->v4l2_dev, "irq sts=%#x %s%s%s%s\n", sts, +@@ -755,8 +1155,6 @@ + if (test_bit(VIDEO_RES_DETECT, &video->flags)) { + aspeed_video_update(video, VE_INTERRUPT_CTRL, + VE_INTERRUPT_MODE_DETECT, 0); +- aspeed_video_write(video, VE_INTERRUPT_STATUS, +- VE_INTERRUPT_MODE_DETECT); + sts &= ~VE_INTERRUPT_MODE_DETECT; + set_bit(VIDEO_MODE_DETECT_DONE, &video->flags); + wake_up_interruptible_all(&video->wait); +@@ -772,41 +1170,13 @@ + } + + if (sts & VE_INTERRUPT_COMP_COMPLETE) { +- struct aspeed_video_buffer *buf; +- bool empty = true; +- u32 frame_size = aspeed_video_read(video, +- video->comp_size_read); +- +- update_perf(&video->perf); +- +- spin_lock(&video->lock); +- clear_bit(VIDEO_FRAME_INPRG, &video->flags); +- buf = list_first_entry_or_null(&video->buffers, +- struct aspeed_video_buffer, +- link); +- if (buf) { +- vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size); ++ bool frame_done = false; + +- /* +- * aspeed_jpeg requires continuous update. +- * On the contrary, standard jpeg can keep last buffer +- * to always have the latest result. +- */ +- if (video->format == VIDEO_FMT_STANDARD && +- list_is_last(&buf->link, &video->buffers)) { +- empty = false; +- v4l2_dbg(1, debug, &video->v4l2_dev, "skip to keep last frame updated\n"); +- } else { +- buf->vb.vb2_buf.timestamp = ktime_get_ns(); +- buf->vb.sequence = video->sequence++; +- buf->vb.field = V4L2_FIELD_NONE; +- vb2_buffer_done(&buf->vb.vb2_buf, +- VB2_BUF_STATE_DONE); +- list_del(&buf->link); +- empty = list_empty(&video->buffers); +- } +- } +- spin_unlock(&video->lock); ++ if (video->format != VIDEO_FMT_PARTIAL) ++ frame_done = true; ++ else if (!test_bit(VIDEO_BOUNDING_BOX, &video->flags) && ++ video->bounding_box.width && video->bounding_box.height) ++ frame_done = true; + + aspeed_video_update(video, VE_SEQ_CTRL, + VE_SEQ_CTRL_TRIG_CAPTURE | +@@ -814,17 +1184,60 @@ + VE_SEQ_CTRL_TRIG_COMP, 0); + aspeed_video_update(video, VE_INTERRUPT_CTRL, + VE_INTERRUPT_COMP_COMPLETE, 0); +- aspeed_video_write(video, VE_INTERRUPT_STATUS, +- VE_INTERRUPT_COMP_COMPLETE); + sts &= ~VE_INTERRUPT_COMP_COMPLETE; + +- aspeed_video_swap_src_buf(video); ++ if (frame_done) { ++ update_perf(&video->perf); ++ aspeed_video_frame_done_handler(video, frame_done); ++ } else { ++ get_box = true; ++ } ++ } + +- if (test_bit(VIDEO_STREAMING, &video->flags) && !empty) +- aspeed_video_start_frame(video); ++ return get_box ? IRQ_WAKE_THREAD : IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_video_md_irq(int irq, void *arg) ++{ ++ struct aspeed_video *video = arg; ++ u32 sts; ++ ++ sts = readl(video->dvi_base + VE_INTERRUPT_STATUS); ++ writel(sts, video->dvi_base + VE_INTERRUPT_STATUS); ++ sts &= readl(video->dvi_base + VE_INTERRUPT_CTRL); ++ ++ v4l2_dbg(2, debug, &video->v4l2_dev, "dvi irq sts=%#x %s%s\n", sts, ++ sts & VE_INTERRUPT_MODE_DETECT_WD ? ", unlock" : "", ++ sts & VE_INTERRUPT_MODE_DETECT ? ", lock" : ""); ++ ++ if (sts & VE_INTERRUPT_MODE_DETECT_WD) { ++ writel(0, video->dvi_base + VE_INTERRUPT_CTRL); ++ writel(0xffffffff, video->dvi_base + VE_INTERRUPT_STATUS); ++ aspeed_video_irq_res_change(video, 0); ++ return IRQ_HANDLED; + } + +- return sts ? IRQ_NONE : IRQ_HANDLED; ++ if (sts & VE_INTERRUPT_MODE_DETECT) { ++ if (test_bit(VIDEO_RES_DETECT, &video->flags)) { ++ aspeed_video_update(video, VE_INTERRUPT_CTRL, ++ VE_INTERRUPT_MODE_DETECT, 0); ++ sts &= ~VE_INTERRUPT_MODE_DETECT; ++ set_bit(VIDEO_MODE_DETECT_DONE, &video->flags); ++ wake_up_interruptible_all(&video->wait); ++ } else { ++ /* ++ * Signal acquired while NOT doing resolution ++ * detection; reset the engine and re-initialize ++ */ ++ writel(0, video->dvi_base + VE_INTERRUPT_CTRL); ++ writel(0xffffffff, video->dvi_base + VE_INTERRUPT_STATUS); ++ aspeed_video_irq_res_change(video, ++ RESOLUTION_CHANGE_DELAY); ++ return IRQ_HANDLED; ++ } ++ } ++ ++ return IRQ_HANDLED; + } + + static void aspeed_video_check_and_set_polarity(struct aspeed_video *video) +@@ -896,7 +1309,7 @@ + + /* + * Get the minimum HW-supported compression buffer size for the frame size. +- * Assume worst-case JPEG compression size is 1/8 raw size. This should be ++ * Assume worst-case JPEG compression size is 1/4 raw size. This should be + * plenty even for maximum quality; any worse and the engine will simply return + * incomplete JPEGs. + */ +@@ -908,7 +1321,7 @@ + unsigned int size; + const unsigned int num_compression_packets = 4; + const unsigned int compression_packet_size = 1024; +- const unsigned int max_compressed_size = frame_size / 2; /* 4bpp / 8 */ ++ const unsigned int max_compressed_size = frame_size; /* 4bpp / 4 */ + + video->max_compressed_size = UINT_MAX; + +@@ -929,6 +1342,7 @@ + aspeed_video_write(video, VE_STREAM_BUF_SIZE, + compression_buffer_size_reg); + ++ video->max_compressed_size = round_up(max_compressed_size, 0x10000); + v4l2_dbg(1, debug, &video->v4l2_dev, "Max compressed size: %#x\n", + video->max_compressed_size); + } +@@ -1026,9 +1440,23 @@ + } + } + ++static void aspeed_video_get_resolution_gfx(struct aspeed_video *video, ++ struct v4l2_bt_timings *det) ++{ ++ u32 h_val, v_val; ++ ++ regmap_read(video->gfx, GFX_H_DISPLAY, &h_val); ++ regmap_read(video->gfx, GFX_V_DISPLAY, &v_val); ++ ++ det->width = FIELD_GET(GFX_H_DISPLAY_DE, h_val) + 1; ++ det->height = FIELD_GET(GFX_V_DISPLAY_DE, v_val) + 1; ++ video->v4l2_input_status = 0; ++} ++ + #define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags) + +-static void aspeed_video_get_resolution(struct aspeed_video *video) ++static void aspeed_video_get_resolution_vga(struct aspeed_video *video, ++ struct v4l2_bt_timings *det) + { + bool invalid_resolution = true; + int rc; +@@ -1036,7 +1464,6 @@ + u32 mds; + u32 src_lr_edge; + u32 src_tb_edge; +- struct v4l2_bt_timings *det = &video->detected_timings; + + det->width = MIN_WIDTH; + det->height = MIN_HEIGHT; +@@ -1107,20 +1534,51 @@ + return; + } + ++ if (video->input == VIDEO_INPUT_DVI && video->version == 6) ++ video->frame_right -= 1; ++ + det->height = (video->frame_bottom - video->frame_top) + 1; + det->width = (video->frame_right - video->frame_left) + 1; + video->v4l2_input_status = 0; + + aspeed_video_get_timings(video, det); + +- /* +- * Enable mode-detect watchdog, resolution-change watchdog and +- * automatic compression after frame capture. +- */ ++ /* Enable mode-detect watchdog, resolution-change watchdog */ + aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, + VE_INTERRUPT_MODE_DETECT_WD); +- aspeed_video_update(video, VE_SEQ_CTRL, 0, +- VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG); ++ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_EN_WATCHDOG); ++} ++ ++/* ++ * For ast2700 only. Due to hw design, the timing detection of DVI is ++ * in io-die. Thus, we need to use another hw to do this job. ++ */ ++static void aspeed_video_get_resolution_dvi(struct aspeed_video *video, ++ struct v4l2_bt_timings *det) ++{ ++ void *base = video->base; ++ ++ video->base = video->dvi_base; ++ aspeed_video_get_resolution_vga(video, det); ++ video->base = base; ++} ++ ++static void aspeed_video_get_resolution(struct aspeed_video *video) ++{ ++ struct v4l2_bt_timings *det = &video->detected_timings; ++ ++ // if input is MEM, leave resolution decided by user through set_dv_timings ++ if (video->input == VIDEO_INPUT_MEM) { ++ video->v4l2_input_status = 0; ++ return; ++ } ++ ++ if (video->input == VIDEO_INPUT_GFX) ++ aspeed_video_get_resolution_gfx(video, det); ++ else if (video->input == VIDEO_INPUT_DVI && video->version == 7) ++ aspeed_video_get_resolution_dvi(video, det); ++ else ++ aspeed_video_get_resolution_vga(video, det); + + v4l2_dbg(1, debug, &video->v4l2_dev, "Got resolution: %dx%d\n", + det->width, det->height); +@@ -1130,6 +1588,7 @@ + { + struct v4l2_bt_timings *act = &video->active_timings; + unsigned int size = act->width * ALIGN(act->height, 8); ++ bool is_sync_mode_ok = (video->version != 7); + + /* Set capture/compression frame sizes */ + aspeed_video_calc_compressed_size(video, size); +@@ -1156,7 +1615,8 @@ + aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4); + + /* Don't use direct mode below 1024 x 768 (irqs don't fire) */ +- if (size < DIRECT_FETCH_THRESHOLD) { ++ if (video->input == VIDEO_INPUT_VGA && size < DIRECT_FETCH_THRESHOLD && ++ is_sync_mode_ok) { + v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode\n"); + aspeed_video_write(video, VE_TGS_0, + FIELD_PREP(VE_TGS_FIRST, +@@ -1170,41 +1630,50 @@ + aspeed_video_update(video, VE_CTRL, + VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH, + VE_CTRL_INT_DE); ++ } else if (video->input == VIDEO_INPUT_DVI) { ++ v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode for external source\n"); ++ aspeed_video_update(video, VE_CTRL, ++ VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH, ++ 0); + } else { ++ u32 ctrl, val, bpp; ++ + v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Direct Mode\n"); ++ ctrl = VE_CTRL_DIRECT_FETCH; ++ if (video->input == VIDEO_INPUT_GFX) { ++ regmap_read(video->gfx, GFX_CTRL, &val); ++ bpp = FIELD_GET(GFX_CTRL_FMT, val) ? 32 : 16; ++ if (bpp == 16) ++ ctrl |= VE_CTRL_INT_DE; ++ aspeed_video_write(video, VE_TGS_1, act->width * (bpp >> 3)); ++ } else { ++ // stride should be the same with capture window width ++ val = aspeed_video_read(video, VE_CAP_WINDOW) >> 16; ++ aspeed_video_write(video, VE_TGS_1, val * 4); ++ } + aspeed_video_update(video, VE_CTRL, + VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH, +- VE_CTRL_DIRECT_FETCH); +- } +- +- size *= 4; +- +- if (size != video->srcs[0].size) { +- if (video->srcs[0].size) +- aspeed_video_free_buf(video, &video->srcs[0]); +- if (video->srcs[1].size) +- aspeed_video_free_buf(video, &video->srcs[1]); +- +- if (!aspeed_video_alloc_buf(video, &video->srcs[0], size)) +- goto err_mem; +- if (!aspeed_video_alloc_buf(video, &video->srcs[1], size)) +- goto err_mem; +- +- v4l2_dbg(1, debug, &video->v4l2_dev, "src buf0 addr(%pad) size(%d)\n", +- &video->srcs[0].dma, video->srcs[0].size); +- v4l2_dbg(1, debug, &video->v4l2_dev, "src buf1 addr(%pad) size(%d)\n", +- &video->srcs[1].dma, video->srcs[1].size); +- aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma); +- aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma); ++ ctrl); + } + +- return; ++ aspeed_video_write(video, VE_SRC0_ADDR, _make_addr(video->srcs[0].dma)); ++ aspeed_video_write(video, VE_SRC1_ADDR, _make_addr(video->srcs[1].dma)); ++} + +-err_mem: +- dev_err(video->dev, "Failed to allocate source buffers\n"); ++/* ++ * Update relative parameters when timing changed. ++ * ++ * @video: the struct of aspeed_video ++ * @timings: the new timings ++ */ ++static void aspeed_video_update_timings(struct aspeed_video *video, struct v4l2_bt_timings *timings) ++{ ++ video->active_timings = *timings; ++ aspeed_video_set_resolution(video); + +- if (video->srcs[0].size) +- aspeed_video_free_buf(video, &video->srcs[0]); ++ video->pix_fmt.width = timings->width; ++ video->pix_fmt.height = timings->height; ++ video->pix_fmt.sizeimage = video->max_compressed_size; + } + + static void aspeed_video_update_regs(struct aspeed_video *video) +@@ -1219,6 +1688,8 @@ + u32 ctrl = 0; + u32 seq_ctrl = 0; + ++ v4l2_dbg(1, debug, &video->v4l2_dev, "input(%s)\n", ++ input_str[video->input]); + v4l2_dbg(1, debug, &video->v4l2_dev, "framerate(%d)\n", + video->frame_rate); + v4l2_dbg(1, debug, &video->v4l2_dev, "jpeg format(%s) subsample(%s)\n", +@@ -1234,14 +1705,26 @@ + else + aspeed_video_update(video, VE_BCD_CTRL, VE_BCD_CTRL_EN_BCD, 0); + ++ if (video->input == VIDEO_INPUT_VGA) ++ ctrl |= VE_CTRL_AUTO_OR_CURSOR; ++ ++ if (video->input == VIDEO_INPUT_DVI) ++ ctrl |= VE_CTRL_SOURCE; ++ + if (video->frame_rate) + ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate); + ++ if (video->format == VIDEO_FMT_PARTIAL) ++ ctrl |= video->compare_only; ++ + if (video->format == VIDEO_FMT_STANDARD) { + comp_ctrl &= ~FIELD_PREP(VE_COMP_CTRL_EN_HQ, video->hq_mode); + seq_ctrl |= video->jpeg_mode; + } + ++ if (video->format != VIDEO_FMT_PARTIAL) ++ seq_ctrl |= VE_SEQ_CTRL_AUTO_COMP; ++ + if (video->yuv420) + seq_ctrl |= VE_SEQ_CTRL_YUV420; + +@@ -1252,7 +1735,9 @@ + aspeed_video_update(video, VE_SEQ_CTRL, + video->jpeg_mode | VE_SEQ_CTRL_YUV420, + seq_ctrl); +- aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC, ctrl); ++ aspeed_video_update(video, VE_CTRL, ++ VE_CTRL_FRC | VE_CTRL_AUTO_OR_CURSOR | ++ VE_CTRL_SOURCE, ctrl); + aspeed_video_update(video, VE_COMP_CTRL, + VE_COMP_CTRL_DCT_LUM | VE_COMP_CTRL_DCT_CHR | + VE_COMP_CTRL_EN_HQ | VE_COMP_CTRL_HQ_DCT_LUM | +@@ -1278,8 +1763,16 @@ + aspeed_video_write(video, VE_COMP_OFFSET, 0); + + aspeed_video_write(video, VE_JPEG_ADDR, video->jpeg.dma); ++ aspeed_video_write(video, VE_BCD_ADDR, _make_addr(video->bcd.dma)); + + /* Set control registers */ ++ aspeed_video_write(video, VE_SEQ_CTRL, VE_SEQ_CTRL_AUTO_COMP); ++ if (video->version == 7) { ++ if (video->input == VIDEO_INPUT_DVI) ++ ctrl |= FIELD_PREP(VE_CTRL_CLK_DELAY, VIDEO_CLK_CRT2); ++ else ++ ctrl |= FIELD_PREP(VE_CTRL_CLK_DELAY, VIDEO_CLK_48MHz); ++ } + aspeed_video_write(video, VE_CTRL, ctrl); + aspeed_video_write(video, VE_COMP_CTRL, VE_COMP_CTRL_RSVD); + +@@ -1311,12 +1804,7 @@ + aspeed_video_get_resolution(video); + + /* Set timings since the device is being opened for the first time */ +- video->active_timings = video->detected_timings; +- aspeed_video_set_resolution(video); +- +- video->pix_fmt.width = video->active_timings.width; +- video->pix_fmt.height = video->active_timings.height; +- video->pix_fmt.sizeimage = video->max_compressed_size; ++ aspeed_video_update_timings(video, &video->detected_timings); + } + + static void aspeed_video_stop(struct aspeed_video *video) +@@ -1326,15 +1814,6 @@ + + aspeed_video_off(video); + +- if (video->srcs[0].size) +- aspeed_video_free_buf(video, &video->srcs[0]); +- +- if (video->srcs[1].size) +- aspeed_video_free_buf(video, &video->srcs[1]); +- +- if (video->bcd.size) +- aspeed_video_free_buf(video, &video->bcd); +- + video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; + video->flags = 0; + } +@@ -1342,10 +1821,14 @@ + static int aspeed_video_querycap(struct file *file, void *fh, + struct v4l2_capability *cap) + { ++ struct aspeed_video *video = video_drvdata(file); ++ + strscpy(cap->driver, DEVICE_NAME, sizeof(cap->driver)); + strscpy(cap->card, "Aspeed Video Engine", sizeof(cap->card)); + snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", + DEVICE_NAME); ++ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s%d", ++ DEVICE_NAME, video->id); + + return 0; + } +@@ -1383,7 +1866,8 @@ + + switch (f->fmt.pix.pixelformat) { + case V4L2_PIX_FMT_JPEG: +- video->format = VIDEO_FMT_STANDARD; ++ video->format = (f->fmt.pix.flags == V4L2_PIX_FMT_FLAG_PARTIAL_JPG) ++ ? VIDEO_FMT_PARTIAL : VIDEO_FMT_STANDARD; + break; + case V4L2_PIX_FMT_AJPG: + video->format = VIDEO_FMT_ASPEED; +@@ -1401,10 +1885,10 @@ + { + struct aspeed_video *video = video_drvdata(file); + +- if (inp->index) ++ if (inp->index >= VIDEO_INPUT_MAX) + return -EINVAL; + +- strscpy(inp->name, "Host VGA capture", sizeof(inp->name)); ++ sprintf(inp->name, "%s capture", input_str[inp->index]); + inp->type = V4L2_INPUT_TYPE_CAMERA; + inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; + inp->status = video->v4l2_input_status; +@@ -1414,15 +1898,116 @@ + + static int aspeed_video_get_input(struct file *file, void *fh, unsigned int *i) + { +- *i = 0; ++ struct aspeed_video *video = video_drvdata(file); ++ ++ *i = video->input; + + return 0; + } + + static int aspeed_video_set_input(struct file *file, void *fh, unsigned int i) + { +- if (i) ++ struct aspeed_video *video = video_drvdata(file); ++ ++ if (i >= VIDEO_INPUT_MAX) ++ return -EINVAL; ++ ++ if (i == video->input) ++ return 0; ++ ++ if (vb2_is_busy(&video->queue)) ++ return -EBUSY; ++ ++ if (IS_ERR(video->scu)) { ++ v4l2_dbg(1, debug, &video->v4l2_dev, "%s: scu isn't ready for input-control\n", __func__); + return -EINVAL; ++ } ++ ++ if (IS_ERR(video->gfx) && i == VIDEO_INPUT_GFX) { ++ v4l2_dbg(1, debug, &video->v4l2_dev, "%s: gfx isn't ready for GFX input\n", __func__); ++ return -EINVAL; ++ } ++ ++ // prepare memory space for user to put test batch ++ if (i == VIDEO_INPUT_MEM && !video->dbg_src.size) { ++ if (!aspeed_video_alloc_buf(video, &video->dbg_src, VE_MAX_SRC_BUFFER_SIZE)) { ++ v4l2_err(&video->v4l2_dev, "Failed to allocate buffer for debug input\n"); ++ return -EINVAL; ++ } ++ v4l2_dbg(1, debug, &video->v4l2_dev, "dbg src addr(%pad) size(%d)\n", ++ &video->dbg_src.dma, video->dbg_src.size); ++ } ++ if (i != VIDEO_INPUT_MEM && video->dbg_src.size) ++ aspeed_video_free_buf(video, &video->dbg_src); ++ ++ if (i == VIDEO_INPUT_DVI && video->version == 7) { ++ if (IS_ERR(video->dvi_base)) { ++ v4l2_err(&video->v4l2_dev, "%s: dvi isn't ready for DVI input\n", __func__); ++ return -EINVAL; ++ } ++ ++ /* Set DVI mode detection defaults */ ++ writel(FIELD_PREP(VE_MODE_DT_HOR_TOLER, 2) | ++ FIELD_PREP(VE_MODE_DT_VER_TOLER, 2) | ++ FIELD_PREP(VE_MODE_DT_HOR_STABLE, 6) | ++ FIELD_PREP(VE_MODE_DT_VER_STABLE, 6) | ++ FIELD_PREP(VE_MODE_DT_EDG_THROD, 0x65), ++ video->dvi_base + VE_MODE_DETECT); ++ } ++ ++ video->input = i; ++ ++ if (video->version == 6) { ++ /* modify dpll source per current input */ ++ if (video->input == VIDEO_INPUT_VGA) ++ regmap_update_bits(video->scu, SCU_MISC_CTRL, SCU_DPLL_SOURCE, 0); ++ else ++ regmap_update_bits(video->scu, SCU_MISC_CTRL, SCU_DPLL_SOURCE, SCU_DPLL_SOURCE); ++ ++ // SLI direction: inverse if DVI ++ if (video->input == VIDEO_INPUT_DVI) { ++ regmap_update_bits(video->scu, SCU_MULTI_FUNC_12, ++ SCU_MULTI_FUNC_CPU_SLI_DIR, ++ SCU_MULTI_FUNC_CPU_SLI_DIR); ++ regmap_update_bits(video->scu, SCU_MULTI_FUNC_15, ++ SCU_MULTI_FUNC_IO_SLI_DIR, ++ SCU_MULTI_FUNC_IO_SLI_DIR); ++ regmap_update_bits(video->scu, SCU_CLK_SEL2, ++ SCU_VIDEO_OUTPUT_DELAY, ++ 4); ++ } else { ++ regmap_update_bits(video->scu, SCU_MULTI_FUNC_12, ++ SCU_MULTI_FUNC_CPU_SLI_DIR, ++ 0); ++ regmap_update_bits(video->scu, SCU_MULTI_FUNC_15, ++ SCU_MULTI_FUNC_IO_SLI_DIR, ++ 0); ++ } ++ } else if (video->version == 7) { ++ if (video->input == VIDEO_INPUT_DVI) { ++ // CRT2CLK = 500 * R / N ++ regmap_write(video->scu, SCU_CRT2CLK, ++ FIELD_PREP(SCU_CRT2CLK_N, 50) | FIELD_PREP(SCU_CRT2CLK_R, 15)); ++ ++ regmap_write(video->scu, SCU_CLK_SEL, FIELD_PREP(SCU_SOC_DISPLAY_SEL, 1)); ++ } else { ++ regmap_write(video->scu, SCU_CLK_SEL, FIELD_PREP(SCU_SOC_DISPLAY_SEL, 0)); ++ } ++ } ++ ++ aspeed_video_update_regs(video); ++ ++ // update signal status ++ if (video->input == VIDEO_INPUT_MEM) { ++ video->v4l2_input_status = 0; ++ } else { ++ aspeed_video_get_resolution(video); ++ if (!video->v4l2_input_status) ++ aspeed_video_update_timings(video, &video->detected_timings); ++ } ++ ++ if (video->input == VIDEO_INPUT_MEM) ++ aspeed_video_start_frame(video); + + return 0; + } +@@ -1520,6 +2105,12 @@ + { + struct aspeed_video *video = video_drvdata(file); + ++ // if input is MEM, resolution decided by user ++ if (video->input == VIDEO_INPUT_MEM) { ++ video->detected_timings.width = timings->bt.width; ++ video->detected_timings.height = timings->bt.height; ++ } ++ + if (timings->bt.width == video->active_timings.width && + timings->bt.height == video->active_timings.height) + return 0; +@@ -1527,13 +2118,7 @@ + if (vb2_is_busy(&video->queue)) + return -EBUSY; + +- video->active_timings = timings->bt; +- +- aspeed_video_set_resolution(video); +- +- video->pix_fmt.width = timings->bt.width; +- video->pix_fmt.height = timings->bt.height; +- video->pix_fmt.sizeimage = video->max_compressed_size; ++ aspeed_video_update_timings(video, &timings->bt); + + timings->type = V4L2_DV_BT_656_1120; + +@@ -1589,6 +2174,37 @@ + NULL, NULL); + } + ++static int aspeed_video_g_selection(struct file *file, void *fh, ++ struct v4l2_selection *s) ++{ ++ struct aspeed_video *video = video_drvdata(file); ++ struct aspeed_video_box *box; ++ unsigned long flags; ++ ++ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && ++ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) ++ return -EINVAL; ++ ++ switch (s->target) { ++ case V4L2_SEL_TGT_CROP_DEFAULT: ++ spin_lock_irqsave(&video->lock, flags); ++ box = list_first_entry_or_null(&video->boxes, ++ struct aspeed_video_box, ++ link); ++ if (box) { ++ s->r = box->box; ++ list_del(&box->link); ++ kfree(box); ++ } else { ++ memset(&s->r, 0, sizeof(s->r)); ++ } ++ spin_unlock_irqrestore(&video->lock, flags); ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++ + static int aspeed_video_dv_timings_cap(struct file *file, void *fh, + struct v4l2_dv_timings_cap *cap) + { +@@ -1641,6 +2257,8 @@ + .vidioc_enum_dv_timings = aspeed_video_enum_dv_timings, + .vidioc_dv_timings_cap = aspeed_video_dv_timings_cap, + ++ .vidioc_g_selection = aspeed_video_g_selection, ++ + .vidioc_subscribe_event = aspeed_video_sub_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + }; +@@ -1710,6 +2328,9 @@ + struct delayed_work *dwork = to_delayed_work(work); + struct aspeed_video *video = container_of(dwork, struct aspeed_video, + res_work); ++ bool is_res_chg = false; ++ ++ aspeed_video_reset(video); + + aspeed_video_on(video); + +@@ -1723,8 +2344,14 @@ + + aspeed_video_get_resolution(video); + +- if (video->detected_timings.width != video->active_timings.width || +- video->detected_timings.height != video->active_timings.height) { ++ if (video->v4l2_input_status) ++ goto done; ++ ++ is_res_chg = (video->detected_timings.width != video->active_timings.width || ++ video->detected_timings.height != video->active_timings.height); ++ aspeed_video_update_timings(video, &video->detected_timings); ++ ++ if (is_res_chg) { + static const struct v4l2_event ev = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, +@@ -1740,6 +2367,32 @@ + done: + clear_bit(VIDEO_RES_CHANGE, &video->flags); + wake_up_interruptible_all(&video->wait); ++ wake_up_all(&waitq); ++} ++ ++/* ++ * To mmap source memory for test from memory usage. ++ * test from memory input mode requires much bigger size because it is ++ * uncompressed BGRA format. Thus, We use VM_READ to tell it is for test ++ * or v4l2 now. ++ */ ++static int aspeed_video_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ int rc; ++ struct aspeed_video *v = video_drvdata(file); ++ const size_t size = vma->vm_end - vma->vm_start; ++ const unsigned long pfn = __phys_to_pfn(v->dbg_src.dma); ++ ++ if (v->input != VIDEO_INPUT_MEM || vma->vm_flags & VM_READ) ++ return vb2_fop_mmap(file, vma); ++ ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ rc = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); ++ if (rc) { ++ v4l2_err(&v->v4l2_dev, "remap_pfn_range failed(%d)\n", rc); ++ return -EAGAIN; ++ } ++ return 0; + } + + static int aspeed_video_open(struct file *file) +@@ -1785,7 +2438,7 @@ + .read = vb2_fop_read, + .poll = vb2_fop_poll, + .unlocked_ioctl = video_ioctl2, +- .mmap = vb2_fop_mmap, ++ .mmap = aspeed_video_mmap, + .open = aspeed_video_open, + .release = aspeed_video_release, + }; +@@ -1830,13 +2483,17 @@ + video->sequence = 0; + video->perf.duration_max = 0; + video->perf.duration_min = 0xffffffff; ++ set_bit(VIDEO_BOUNDING_BOX, &video->flags); + + aspeed_video_update_regs(video); + +- rc = aspeed_video_start_frame(video); +- if (rc) { +- aspeed_video_bufs_done(video, VB2_BUF_STATE_QUEUED); +- return rc; ++ // if input is MEM, don't start capture until user acquire ++ if (video->input != VIDEO_INPUT_MEM) { ++ rc = aspeed_video_start_frame(video); ++ if (rc) { ++ aspeed_video_bufs_done(video, VB2_BUF_STATE_QUEUED); ++ return rc; ++ } + } + + set_bit(VIDEO_STREAMING, &video->flags); +@@ -1860,8 +2517,7 @@ + * Need to force stop any DMA and try and get HW into a good + * state for future calls to start streaming again. + */ +- aspeed_video_off(video); +- aspeed_video_on(video); ++ aspeed_video_reset(video); + + aspeed_video_init_regs(video); + +@@ -1885,7 +2541,8 @@ + spin_unlock_irqrestore(&video->lock, flags); + + if (test_bit(VIDEO_STREAMING, &video->flags) && +- !test_bit(VIDEO_FRAME_INPRG, &video->flags) && empty) ++ !test_bit(VIDEO_FRAME_INPRG, &video->flags) && empty && ++ (video->input != VIDEO_INPUT_MEM)) + aspeed_video_start_frame(video); + } + +@@ -1911,6 +2568,7 @@ + val08 = aspeed_video_read(v, VE_CTRL); + if (FIELD_GET(VE_CTRL_DIRECT_FETCH, val08)) { + seq_printf(s, " %-20s:\tDirect fetch\n", "Mode"); ++ seq_printf(s, " %-20s:\t%s\n", "Input", input_str[v->input]); + seq_printf(s, " %-20s:\t%s\n", "VGA bpp mode", + FIELD_GET(VE_CTRL_INT_DE, val08) ? "16" : "32"); + } else { +@@ -1962,19 +2620,19 @@ + } + DEFINE_SHOW_ATTRIBUTE(aspeed_video_debugfs); + +-static struct dentry *debugfs_entry; +- + static void aspeed_video_debugfs_remove(struct aspeed_video *video) + { +- debugfs_remove_recursive(debugfs_entry); +- debugfs_entry = NULL; ++ debugfs_remove_recursive(video->debugfs_entry); + } + + static void aspeed_video_debugfs_create(struct aspeed_video *video) + { +- debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL, +- video, +- &aspeed_video_debugfs_fops); ++ char filename[16]; ++ ++ snprintf(filename, sizeof(filename), "%s%d", DEVICE_NAME, video->id); ++ video->debugfs_entry = debugfs_create_file(filename, 0444, ++ video->debugfs_entry, video, ++ &aspeed_video_debugfs_fops); + } + #else + static void aspeed_video_debugfs_remove(struct aspeed_video *video) { } +@@ -2028,6 +2686,8 @@ + vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + vbq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; + vbq->dev = v4l2_dev->dev; ++ snprintf(vdev->name, sizeof(vdev->name), "%s%d", ++ DEVICE_NAME, video->id); + vbq->lock = &video->video_lock; + vbq->ops = &aspeed_video_vb2_ops; + vbq->mem_ops = &vb2_dma_contig_memops; +@@ -2070,11 +2730,30 @@ + return 0; + } + ++/* ++ * Get regmap without checking res, such as clk/reset, that could lead to ++ * conflict. ++ */ ++static struct regmap *aspeed_regmap_lookup(struct device_node *np, const char *property) ++{ ++ struct device_node *syscon_np __free(device_node) = of_parse_phandle(np, property, 0); ++ ++ if (!syscon_np) ++ return ERR_PTR(-ENODEV); ++ ++ return device_node_to_regmap(syscon_np); ++} ++ + static int aspeed_video_init(struct aspeed_video *video) + { + int irq; + int rc; + struct device *dev = video->dev; ++ unsigned int mask_size = (video->version >= 7) ? 64 : 32; ++ u32 resv_size = VE_MAX_SRC_BUFFER_SIZE * 2 + VE_JPEG_HEADER_SIZE + VE_BCD_BUFF_SIZE; ++ ++ video->scu = aspeed_regmap_lookup(dev->of_node, "aspeed,scu"); ++ video->gfx = aspeed_regmap_lookup(dev->of_node, "aspeed,gfx"); + + irq = irq_of_parse_and_map(dev->of_node, 0); + if (!irq) { +@@ -2082,14 +2761,36 @@ + return -ENODEV; + } + +- rc = devm_request_threaded_irq(dev, irq, NULL, aspeed_video_irq, +- IRQF_ONESHOT, DEVICE_NAME, video); ++ rc = devm_request_threaded_irq(dev, irq, aspeed_video_irq, ++ aspeed_video_thread_irq, ++ IRQF_ONESHOT, dev_name(dev), video); + if (rc < 0) { + dev_err(dev, "Unable to request IRQ %d\n", irq); + return rc; + } + dev_info(video->dev, "irq %d\n", irq); + ++ if (!IS_ERR(video->dvi_base)) { ++ irq = irq_of_parse_and_map(dev->of_node, 1); ++ if (!irq) { ++ dev_err(dev, "Unable to find DVI IRQ\n"); ++ return -ENODEV; ++ } ++ ++ rc = devm_request_irq(dev, irq, aspeed_video_md_irq, 0, dev_name(dev), video); ++ if (rc < 0) { ++ dev_err(dev, "Unable to request DVI IRQ %d\n", irq); ++ return rc; ++ } ++ dev_info(video->dev, "dvi mode-detection irq %d\n", irq); ++ } ++ ++ video->reset = devm_reset_control_get_shared(dev, NULL); ++ if (IS_ERR(video->reset)) { ++ dev_err(dev, "Unable to get reset\n"); ++ return PTR_ERR(video->reset); ++ } ++ + video->eclk = devm_clk_get(dev, "eclk"); + if (IS_ERR(video->eclk)) { + dev_err(dev, "Unable to get ECLK\n"); +@@ -2111,22 +2812,53 @@ + if (rc) + goto err_unprepare_eclk; + ++ if (video->version > 6) { ++ video->crt2clk = devm_clk_get(dev, "crt2clk"); ++ if (IS_ERR(video->crt2clk)) { ++ dev_err(dev, "Unable to get CRT2CLK\n"); ++ rc = PTR_ERR(video->crt2clk); ++ goto err_unprepare_vclk; ++ } ++ ++ rc = clk_prepare_enable(video->crt2clk); ++ if (rc) ++ goto err_unprepare_vclk; ++ } ++ + of_reserved_mem_device_init(dev); + +- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(mask_size)); + if (rc) { + dev_err(dev, "Failed to set DMA mask\n"); + goto err_release_reserved_mem; + } + +- if (!aspeed_video_alloc_buf(video, &video->jpeg, +- VE_JPEG_HEADER_SIZE)) { +- dev_err(dev, "Failed to allocate DMA for JPEG header\n"); ++ if (!aspeed_video_alloc_buf(video, &video->pool, resv_size)) { ++ dev_err(dev, "Failed to allocate DMA pool\n"); + rc = -ENOMEM; + goto err_release_reserved_mem; + } +- dev_info(video->dev, "alloc mem size(%d) at %pad for jpeg header\n", +- VE_JPEG_HEADER_SIZE, &video->jpeg.dma); ++ video->jpeg.size = VE_JPEG_HEADER_SIZE; ++ video->jpeg.virt = video->pool.virt; ++ video->jpeg.dma = video->pool.dma; ++ video->bcd.size = VE_BCD_BUFF_SIZE; ++ video->bcd.virt = video->jpeg.virt + video->jpeg.size; ++ video->bcd.dma = video->jpeg.dma + video->jpeg.size; ++ video->srcs[0].size = VE_MAX_SRC_BUFFER_SIZE; ++ video->srcs[0].dma = video->bcd.dma + video->bcd.size; ++ video->srcs[1].size = VE_MAX_SRC_BUFFER_SIZE; ++ video->srcs[1].dma = video->srcs[0].dma + video->srcs[0].size; ++ ++ dev_info(video->dev, "alloc mem size(%d) at %pad for pool\n", ++ resv_size, &video->pool.dma); ++ v4l2_dbg(1, debug, &video->v4l2_dev, "jpeg header addr(%pad) size(%d)\n", ++ &video->jpeg.dma, video->jpeg.size); ++ v4l2_dbg(1, debug, &video->v4l2_dev, "bcd addr(%pad) size(%d)\n", ++ &video->bcd.dma, video->bcd.size); ++ v4l2_dbg(1, debug, &video->v4l2_dev, "src buf0 addr(%pad) size(%d)\n", ++ &video->srcs[0].dma, video->srcs[0].size); ++ v4l2_dbg(1, debug, &video->v4l2_dev, "src buf1 addr(%pad) size(%d)\n", ++ &video->srcs[1].dma, video->srcs[1].size); + + aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420); + +@@ -2134,6 +2866,9 @@ + + err_release_reserved_mem: + of_reserved_mem_device_release(dev); ++ if (!IS_ERR(video->crt2clk)) ++ clk_disable_unprepare(video->crt2clk); ++err_unprepare_vclk: + clk_unprepare(video->vclk); + err_unprepare_eclk: + clk_unprepare(video->eclk); +@@ -2145,6 +2880,7 @@ + { .compatible = "aspeed,ast2400-video-engine", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-video-engine", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-video-engine", .data = &ast2600_config }, ++ { .compatible = "aspeed,ast2700-video-engine", .data = &ast2700_config }, + {} + }; + MODULE_DEVICE_TABLE(of, aspeed_video_of_match); +@@ -2159,6 +2895,10 @@ + if (!video) + return -ENOMEM; + ++ video->id = of_alias_get_id(pdev->dev.of_node, "video"); ++ if (video->id < 0) ++ video->id = 0; ++ + video->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(video->base)) + return PTR_ERR(video->base); +@@ -2167,8 +2907,10 @@ + if (!config) + return -ENODEV; + ++ video->version = config->version; + video->jpeg_mode = config->jpeg_mode; + video->comp_size_read = config->comp_size_read; ++ video->compare_only = config->compare_only; + + video->frame_rate = 30; + video->jpeg_hq_quality = 1; +@@ -2178,6 +2920,19 @@ + init_waitqueue_head(&video->wait); + INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work); + INIT_LIST_HEAD(&video->buffers); ++ INIT_LIST_HEAD(&video->boxes); ++ ++ video->rst_wq = create_singlethread_workqueue("video_rst_wq"); ++ if (!video->rst_wq) { ++ dev_err(&pdev->dev, "unable to alloc rst workqueue\n"); ++ return -ENOMEM; ++ } ++ INIT_WORK(&video->rst_work, aspeed_video_rst_worker); ++ ++ if (video->version == 7 && video->id == 0) ++ video->dvi_base = devm_platform_ioremap_resource(pdev, 1); ++ else ++ video->dvi_base = ERR_PTR(-ENODEV); + + rc = aspeed_video_init(video); + if (rc) +@@ -2185,7 +2940,6 @@ + + rc = aspeed_video_setup_video(video); + if (rc) { +- aspeed_video_free_buf(video, &video->jpeg); + clk_unprepare(video->vclk); + clk_unprepare(video->eclk); + return rc; +@@ -2193,6 +2947,9 @@ + + aspeed_video_debugfs_create(video); + ++ dev_info(video->dev, "%s%d registered as /dev/video%d\n", DEVICE_NAME, ++ video->id, video->vdev.num); ++ + return 0; + } + +@@ -2204,8 +2961,12 @@ + + aspeed_video_off(video); + ++ destroy_workqueue(video->rst_wq); ++ + aspeed_video_debugfs_remove(video); + ++ if (!IS_ERR(video->crt2clk)) ++ clk_disable_unprepare(video->crt2clk); + clk_unprepare(video->vclk); + clk_unprepare(video->eclk); + +@@ -2215,7 +2976,7 @@ + + v4l2_device_unregister(v4l2_dev); + +- aspeed_video_free_buf(video, &video->jpeg); ++ aspeed_video_free_buf(video, &video->pool); + + of_reserved_mem_device_release(dev); + } +diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c +--- a/drivers/mmc/host/sdhci-of-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/mmc/host/sdhci-of-aspeed.c 2025-12-23 10:16:20.668040312 +0000 +@@ -11,8 +11,10 @@ + #include + #include + #include ++#include + #include + #include ++#include + #include + + #include "sdhci-pltfm.h" +@@ -23,6 +25,8 @@ + #define ASPEED_SDC_PHASE 0xf4 + #define ASPEED_SDC_S1_PHASE_IN GENMASK(25, 21) + #define ASPEED_SDC_S0_PHASE_IN GENMASK(20, 16) ++#define ASPEED_SDC_S0_PHASE_IN_SHIFT 16 ++#define ASPEED_SDC_S0_PHASE_OUT_SHIFT 3 + #define ASPEED_SDC_S1_PHASE_OUT GENMASK(15, 11) + #define ASPEED_SDC_S1_PHASE_IN_EN BIT(10) + #define ASPEED_SDC_S1_PHASE_OUT_EN GENMASK(9, 8) +@@ -31,61 +35,52 @@ + #define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0) + #define ASPEED_SDC_PHASE_MAX 31 + ++#define ASPEED_SDHCI_TAP_PARAM_INVERT_CLK BIT(4) ++#define ASPEED_SDHCI_NR_TAPS 15 ++ + /* SDIO{10,20} */ +-#define ASPEED_SDC_CAP1_1_8V (0 * 32 + 26) ++#define ASPEED_SDC_CAP1_1_8V (0 * 32 + 26) + /* SDIO{14,24} */ +-#define ASPEED_SDC_CAP2_SDR104 (1 * 32 + 1) ++#define ASPEED_SDC_CAP2_SDR104 (1 * 32 + 1) ++ ++#define PROBE_AFTER_ASSET_DEASSERT 0x1 ++ ++struct aspeed_sdc_info { ++ u32 flag; ++}; + + struct aspeed_sdc { + struct clk *clk; + struct resource *res; ++ struct reset_control *rst; + + spinlock_t lock; + void __iomem *regs; + }; + +-struct aspeed_sdhci_tap_param { +- bool valid; +- +-#define ASPEED_SDHCI_TAP_PARAM_INVERT_CLK BIT(4) +- u8 in; +- u8 out; +-}; +- +-struct aspeed_sdhci_tap_desc { +- u32 tap_mask; +- u32 enable_mask; +- u8 enable_value; +-}; +- +-struct aspeed_sdhci_phase_desc { +- struct aspeed_sdhci_tap_desc in; +- struct aspeed_sdhci_tap_desc out; +-}; +- + struct aspeed_sdhci_pdata { + unsigned int clk_div_start; +- const struct aspeed_sdhci_phase_desc *phase_desc; +- size_t nr_phase_descs; + }; + + struct aspeed_sdhci { + const struct aspeed_sdhci_pdata *pdata; + struct aspeed_sdc *parent; + u32 width_mask; +- struct mmc_clk_phase_map phase_map; +- const struct aspeed_sdhci_phase_desc *phase_desc; ++}; ++ ++static struct aspeed_sdc_info ast2600_sdc_info = { ++ .flag = PROBE_AFTER_ASSET_DEASSERT + }; + + /* + * The function sets the mirror register for updating + * capbilities of the current slot. + * +- * slot | capability | caps_reg | mirror_reg ++ * slot | capability | caps_reg | mirror_reg + * -----|-------------|----------|------------ +- * 0 | CAP1_1_8V | SDIO140 | SDIO10 ++ * 0 | CAP1_1_8V | SDIO140 | SDIO10 + * 0 | CAP2_SDR104 | SDIO144 | SDIO14 +- * 1 | CAP1_1_8V | SDIO240 | SDIO20 ++ * 1 | CAP1_1_8V | SDIO240 | SDIO20 + * 1 | CAP2_SDR104 | SDIO244 | SDIO24 + */ + static void aspeed_sdc_set_slot_capability(struct sdhci_host *host, struct aspeed_sdc *sdc, +@@ -125,222 +120,217 @@ + spin_unlock(&sdc->lock); + } + +-static u32 +-aspeed_sdc_set_phase_tap(const struct aspeed_sdhci_tap_desc *desc, +- u8 tap, bool enable, u32 reg) +-{ +- reg &= ~(desc->enable_mask | desc->tap_mask); +- if (enable) { +- reg |= tap << __ffs(desc->tap_mask); +- reg |= desc->enable_value << __ffs(desc->enable_mask); +- } +- +- return reg; +-} +- +-static void +-aspeed_sdc_set_phase_taps(struct aspeed_sdc *sdc, +- const struct aspeed_sdhci_phase_desc *desc, +- const struct aspeed_sdhci_tap_param *taps) ++static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width) + { +- u32 reg; ++ struct sdhci_pltfm_host *pltfm_priv; ++ struct aspeed_sdhci *aspeed_sdhci; ++ struct aspeed_sdc *aspeed_sdc; ++ u8 ctrl; + +- spin_lock(&sdc->lock); +- reg = readl(sdc->regs + ASPEED_SDC_PHASE); ++ pltfm_priv = sdhci_priv(host); ++ aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv); ++ aspeed_sdc = aspeed_sdhci->parent; + +- reg = aspeed_sdc_set_phase_tap(&desc->in, taps->in, taps->valid, reg); +- reg = aspeed_sdc_set_phase_tap(&desc->out, taps->out, taps->valid, reg); ++ /* Set/clear 8-bit mode */ ++ aspeed_sdc_configure_8bit_mode(aspeed_sdc, aspeed_sdhci, ++ width == MMC_BUS_WIDTH_8); + +- writel(reg, sdc->regs + ASPEED_SDC_PHASE); +- spin_unlock(&sdc->lock); ++ /* Set/clear 1 or 4 bit mode */ ++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ++ if (width == MMC_BUS_WIDTH_4) ++ ctrl |= SDHCI_CTRL_4BITBUS; ++ else ++ ctrl &= ~SDHCI_CTRL_4BITBUS; ++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } + +-#define PICOSECONDS_PER_SECOND 1000000000000ULL +-#define ASPEED_SDHCI_NR_TAPS 15 +-/* Measured value with *handwave* environmentals and static loading */ +-#define ASPEED_SDHCI_MAX_TAP_DELAY_PS 1253 +-static int aspeed_sdhci_phase_to_tap(struct device *dev, unsigned long rate_hz, +- int phase_deg) +-{ +- u64 phase_period_ps; +- u64 prop_delay_ps; +- u64 clk_period_ps; +- unsigned int tap; +- u8 inverted; +- +- phase_deg %= 360; +- +- if (phase_deg >= 180) { +- inverted = ASPEED_SDHCI_TAP_PARAM_INVERT_CLK; +- phase_deg -= 180; +- dev_dbg(dev, +- "Inverting clock to reduce phase correction from %d to %d degrees\n", +- phase_deg + 180, phase_deg); +- } else { +- inverted = 0; +- } ++static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg) ++{ ++ u32 val = readl(host->ioaddr + reg); + +- prop_delay_ps = ASPEED_SDHCI_MAX_TAP_DELAY_PS / ASPEED_SDHCI_NR_TAPS; +- clk_period_ps = div_u64(PICOSECONDS_PER_SECOND, (u64)rate_hz); +- phase_period_ps = div_u64((u64)phase_deg * clk_period_ps, 360ULL); +- +- tap = div_u64(phase_period_ps, prop_delay_ps); +- if (tap > ASPEED_SDHCI_NR_TAPS) { +- dev_dbg(dev, +- "Requested out of range phase tap %d for %d degrees of phase compensation at %luHz, clamping to tap %d\n", +- tap, phase_deg, rate_hz, ASPEED_SDHCI_NR_TAPS); +- tap = ASPEED_SDHCI_NR_TAPS; +- } ++ if (unlikely(reg == SDHCI_PRESENT_STATE) && ++ (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)) ++ val ^= SDHCI_CARD_PRESENT; + +- return inverted | tap; ++ return val; + } + +-static void +-aspeed_sdhci_phases_to_taps(struct device *dev, unsigned long rate, +- const struct mmc_clk_phase *phases, +- struct aspeed_sdhci_tap_param *taps) ++static void aspeed_sdhci_reset(struct sdhci_host *host, u8 mask) + { +- taps->valid = phases->valid; ++ struct sdhci_pltfm_host *pltfm_priv; ++ struct aspeed_sdhci *aspeed_sdhci; ++ struct aspeed_sdc *aspeed_sdc; ++ u32 save_array[8]; ++ u32 reg_array[] = {SDHCI_DMA_ADDRESS, ++ SDHCI_BLOCK_SIZE, ++ SDHCI_ARGUMENT, ++ SDHCI_HOST_CONTROL, ++ SDHCI_CLOCK_CONTROL, ++ SDHCI_INT_ENABLE, ++ SDHCI_SIGNAL_ENABLE, ++ SDHCI_AUTO_CMD_STATUS}; ++ int i; ++ u16 tran_mode; ++ u32 mmc8_mode; + +- if (!phases->valid) +- return; ++ pltfm_priv = sdhci_priv(host); ++ aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv); ++ aspeed_sdc = aspeed_sdhci->parent; + +- taps->in = aspeed_sdhci_phase_to_tap(dev, rate, phases->in_deg); +- taps->out = aspeed_sdhci_phase_to_tap(dev, rate, phases->out_deg); +-} ++ if (!IS_ERR(aspeed_sdc->rst)) { ++ for (i = 0; i < ARRAY_SIZE(reg_array); i++) ++ save_array[i] = sdhci_readl(host, reg_array[i]); + +-static void +-aspeed_sdhci_configure_phase(struct sdhci_host *host, unsigned long rate) +-{ +- struct aspeed_sdhci_tap_param _taps = {0}, *taps = &_taps; +- struct mmc_clk_phase *params; +- struct aspeed_sdhci *sdhci; +- struct device *dev; ++ tran_mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); ++ mmc8_mode = readl(aspeed_sdc->regs); + +- dev = mmc_dev(host->mmc); +- sdhci = sdhci_pltfm_priv(sdhci_priv(host)); ++ reset_control_assert(aspeed_sdc->rst); ++ mdelay(1); ++ reset_control_deassert(aspeed_sdc->rst); ++ mdelay(1); + +- if (!sdhci->phase_desc) +- return; ++ for (i = 0; i < ARRAY_SIZE(reg_array); i++) ++ sdhci_writel(host, save_array[i], reg_array[i]); ++ ++ sdhci_writew(host, tran_mode, SDHCI_TRANSFER_MODE); ++ writel(mmc8_mode, aspeed_sdc->regs); + +- params = &sdhci->phase_map.phase[host->timing]; +- aspeed_sdhci_phases_to_taps(dev, rate, params, taps); +- aspeed_sdc_set_phase_taps(sdhci->parent, sdhci->phase_desc, taps); +- dev_dbg(dev, +- "Using taps [%d, %d] for [%d, %d] degrees of phase correction at %luHz (%d)\n", +- taps->in & ASPEED_SDHCI_NR_TAPS, +- taps->out & ASPEED_SDHCI_NR_TAPS, +- params->in_deg, params->out_deg, rate, host->timing); ++ sdhci_set_clock(host, host->clock); ++ } ++ ++ sdhci_reset(host, mask); + } + +-static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) ++static int aspeed_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) + { +- struct sdhci_pltfm_host *pltfm_host; +- unsigned long parent, bus; ++ struct sdhci_pltfm_host *pltfm_priv; + struct aspeed_sdhci *sdhci; +- int div; +- u16 clk; +- +- pltfm_host = sdhci_priv(host); +- sdhci = sdhci_pltfm_priv(pltfm_host); ++ struct aspeed_sdc *sdc; ++ struct device *dev; + +- parent = clk_get_rate(pltfm_host->clk); ++ u32 val, left, right, edge; ++ u32 window, oldwindow = 0, center; ++ u32 in_phase, out_phase, enable_mask, inverted = 0; + +- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); ++ dev = mmc_dev(host->mmc); ++ pltfm_priv = sdhci_priv(host); ++ sdhci = sdhci_pltfm_priv(pltfm_priv); ++ sdc = sdhci->parent; + +- if (clock == 0) +- return; ++ out_phase = readl(sdc->regs + ASPEED_SDC_PHASE) & ASPEED_SDC_S0_PHASE_OUT; + +- if (WARN_ON(clock > host->max_clk)) +- clock = host->max_clk; ++ enable_mask = ASPEED_SDC_S0_PHASE_OUT_EN | ASPEED_SDC_S0_PHASE_IN_EN; + + /* +- * Regarding the AST2600: +- * +- * If (EMMC12C[7:6], EMMC12C[15:8] == 0) then +- * period of SDCLK = period of SDMCLK. +- * +- * If (EMMC12C[7:6], EMMC12C[15:8] != 0) then +- * period of SDCLK = period of SDMCLK * 2 * (EMMC12C[7:6], EMMC[15:8]) +- * +- * If you keep EMMC12C[7:6] = 0 and EMMC12C[15:8] as one-hot, +- * 0x1/0x2/0x4/etc, you will find it is compatible to AST2400 or AST2500 +- * +- * Keep the one-hot behaviour for backwards compatibility except for +- * supporting the value 0 in (EMMC12C[7:6], EMMC12C[15:8]), and capture +- * the 0-value capability in clk_div_start. ++ * There are two window upon clock rising and falling edge. ++ * Iterate each tap delay to find the valid window and choose the ++ * bigger one, set the tap delay at the middle of window. + */ +- for (div = sdhci->pdata->clk_div_start; div < 256; div *= 2) { +- bus = parent / div; +- if (bus <= clock) +- break; ++ for (edge = 0; edge < 2; edge++) { ++ if (edge == 1) ++ inverted = ASPEED_SDHCI_TAP_PARAM_INVERT_CLK; ++ ++ val = (out_phase | enable_mask | (inverted << ASPEED_SDC_S0_PHASE_IN_SHIFT)); ++ ++ /* find the left boundary */ ++ for (left = 0; left < ASPEED_SDHCI_NR_TAPS + 1; left++) { ++ in_phase = val | (left << ASPEED_SDC_S0_PHASE_IN_SHIFT); ++ writel(in_phase, sdc->regs + ASPEED_SDC_PHASE); ++ ++ if (!mmc_send_tuning(host->mmc, opcode, NULL)) ++ break; ++ } ++ ++ /* find the right boundary */ ++ for (right = left + 1; right < ASPEED_SDHCI_NR_TAPS + 1; right++) { ++ in_phase = val | (right << ASPEED_SDC_S0_PHASE_IN_SHIFT); ++ writel(in_phase, sdc->regs + ASPEED_SDC_PHASE); ++ ++ if (mmc_send_tuning(host->mmc, opcode, NULL)) ++ break; ++ } ++ ++ window = right - left; ++ pr_debug("tuning window[%d][%d~%d] = %d\n", edge, left, right, window); ++ ++ if (window > oldwindow) { ++ oldwindow = window; ++ center = (((right - 1) + left) / 2) | inverted; ++ } + } + +- div >>= 1; ++ val = (out_phase | enable_mask | (center << ASPEED_SDC_S0_PHASE_IN_SHIFT)); ++ writel(val, sdc->regs + ASPEED_SDC_PHASE); + +- clk = div << SDHCI_DIVIDER_SHIFT; ++ pr_debug("input tuning result=%x\n", val); + +- aspeed_sdhci_configure_phase(host, bus); ++ inverted = 0; ++ out_phase = val & ~ASPEED_SDC_S0_PHASE_OUT; ++ in_phase = out_phase; ++ oldwindow = 0; + +- sdhci_enable_clk(host, clk); +-} ++ for (edge = 0; edge < 2; edge++) { ++ if (edge == 1) ++ inverted = ASPEED_SDHCI_TAP_PARAM_INVERT_CLK; + +-static unsigned int aspeed_sdhci_get_max_clock(struct sdhci_host *host) +-{ +- if (host->mmc->f_max) +- return host->mmc->f_max; ++ val = (in_phase | enable_mask | (inverted << ASPEED_SDC_S0_PHASE_OUT_SHIFT)); + +- return sdhci_pltfm_clk_get_max_clock(host); +-} ++ /* find the left boundary */ ++ for (left = 0; left < ASPEED_SDHCI_NR_TAPS + 1; left++) { ++ out_phase = val | (left << ASPEED_SDC_S0_PHASE_OUT_SHIFT); ++ writel(out_phase, sdc->regs + ASPEED_SDC_PHASE); + +-static void aspeed_sdhci_set_bus_width(struct sdhci_host *host, int width) +-{ +- struct sdhci_pltfm_host *pltfm_priv; +- struct aspeed_sdhci *aspeed_sdhci; +- struct aspeed_sdc *aspeed_sdc; +- u8 ctrl; ++ if (!mmc_send_tuning(host->mmc, opcode, NULL)) ++ break; ++ } + +- pltfm_priv = sdhci_priv(host); +- aspeed_sdhci = sdhci_pltfm_priv(pltfm_priv); +- aspeed_sdc = aspeed_sdhci->parent; ++ /* find the right boundary */ ++ for (right = left + 1; right < ASPEED_SDHCI_NR_TAPS + 1; right++) { ++ out_phase = val | (right << ASPEED_SDC_S0_PHASE_OUT_SHIFT); ++ writel(out_phase, sdc->regs + ASPEED_SDC_PHASE); + +- /* Set/clear 8-bit mode */ +- aspeed_sdc_configure_8bit_mode(aspeed_sdc, aspeed_sdhci, +- width == MMC_BUS_WIDTH_8); ++ if (mmc_send_tuning(host->mmc, opcode, NULL)) ++ break; ++ } + +- /* Set/clear 1 or 4 bit mode */ +- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); +- if (width == MMC_BUS_WIDTH_4) +- ctrl |= SDHCI_CTRL_4BITBUS; +- else +- ctrl &= ~SDHCI_CTRL_4BITBUS; +- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +-} ++ window = right - left; ++ pr_debug("tuning window[%d][%d~%d] = %d\n", edge, left, right, window); + +-static u32 aspeed_sdhci_readl(struct sdhci_host *host, int reg) +-{ +- u32 val = readl(host->ioaddr + reg); ++ if (window > oldwindow) { ++ oldwindow = window; ++ center = (((right - 1) + left) / 2) | inverted; ++ } ++ } + +- if (unlikely(reg == SDHCI_PRESENT_STATE) && +- (host->mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)) +- val ^= SDHCI_CARD_PRESENT; ++ val = (in_phase | enable_mask | (center << ASPEED_SDC_S0_PHASE_OUT_SHIFT)); ++ writel(val, sdc->regs + ASPEED_SDC_PHASE); + +- return val; ++ pr_debug("output tuning result=%x\n", val); ++ ++ return mmc_send_tuning(host->mmc, opcode, NULL); ++} ++ ++static void aspeed_sdhci_voltage_switch(struct sdhci_host *host) ++{ ++ mdelay(30); + } + + static const struct sdhci_ops aspeed_sdhci_ops = { + .read_l = aspeed_sdhci_readl, +- .set_clock = aspeed_sdhci_set_clock, +- .get_max_clock = aspeed_sdhci_get_max_clock, ++ .set_clock = sdhci_set_clock, ++ .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = aspeed_sdhci_set_bus_width, + .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, +- .reset = sdhci_reset, ++ .voltage_switch = aspeed_sdhci_voltage_switch, ++ .reset = aspeed_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, ++ .platform_execute_tuning = aspeed_sdhci_execute_tuning, + }; + + static const struct sdhci_pltfm_data aspeed_sdhci_pdata = { + .ops = &aspeed_sdhci_ops, + .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, ++ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + }; + + static inline int aspeed_sdhci_calculate_slot(struct aspeed_sdhci *dev, +@@ -372,12 +362,6 @@ + int slot; + int ret; + +- aspeed_pdata = of_device_get_match_data(&pdev->dev); +- if (!aspeed_pdata) { +- dev_err(&pdev->dev, "Missing platform configuration data\n"); +- return -EINVAL; +- } +- + host = sdhci_pltfm_init(pdev, &aspeed_sdhci_pdata, sizeof(*dev)); + if (IS_ERR(host)) + return PTR_ERR(host); +@@ -395,14 +379,6 @@ + else if (slot >= 2) + return -EINVAL; + +- if (slot < dev->pdata->nr_phase_descs) { +- dev->phase_desc = &dev->pdata->phase_desc[slot]; +- } else { +- dev_info(&pdev->dev, +- "Phase control not supported for slot %d\n", slot); +- dev->phase_desc = NULL; +- } +- + dev->width_mask = !slot ? ASPEED_SDC_S0_MMC8 : ASPEED_SDC_S1_MMC8; + + dev_info(&pdev->dev, "Configured for slot %d\n", slot); +@@ -413,11 +389,13 @@ + of_property_read_bool(np, "sd-uhs-sdr104")) { + aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP1_1_8V, + true, slot); +- } +- +- if (of_property_read_bool(np, "sd-uhs-sdr104")) { + aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP2_SDR104, + true, slot); ++ } else { ++ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP1_1_8V, ++ 0, slot); ++ aspeed_sdc_set_slot_capability(host, dev->parent, ASPEED_SDC_CAP2_SDR104, ++ 0, slot); + } + + pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); +@@ -434,9 +412,6 @@ + if (ret) + goto err_sdhci_add; + +- if (dev->phase_desc) +- mmc_of_parse_clk_phase(&pdev->dev, &dev->phase_map); +- + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; +@@ -469,45 +444,11 @@ + .clk_div_start = 2, + }; + +-static const struct aspeed_sdhci_phase_desc ast2600_sdhci_phase[] = { +- /* SDHCI/Slot 0 */ +- [0] = { +- .in = { +- .tap_mask = ASPEED_SDC_S0_PHASE_IN, +- .enable_mask = ASPEED_SDC_S0_PHASE_IN_EN, +- .enable_value = 1, +- }, +- .out = { +- .tap_mask = ASPEED_SDC_S0_PHASE_OUT, +- .enable_mask = ASPEED_SDC_S0_PHASE_OUT_EN, +- .enable_value = 3, +- }, +- }, +- /* SDHCI/Slot 1 */ +- [1] = { +- .in = { +- .tap_mask = ASPEED_SDC_S1_PHASE_IN, +- .enable_mask = ASPEED_SDC_S1_PHASE_IN_EN, +- .enable_value = 1, +- }, +- .out = { +- .tap_mask = ASPEED_SDC_S1_PHASE_OUT, +- .enable_mask = ASPEED_SDC_S1_PHASE_OUT_EN, +- .enable_value = 3, +- }, +- }, +-}; +- +-static const struct aspeed_sdhci_pdata ast2600_sdhci_pdata = { +- .clk_div_start = 1, +- .phase_desc = ast2600_sdhci_phase, +- .nr_phase_descs = ARRAY_SIZE(ast2600_sdhci_phase), +-}; +- + static const struct of_device_id aspeed_sdhci_of_match[] = { + { .compatible = "aspeed,ast2400-sdhci", .data = &ast2400_sdhci_pdata, }, +- { .compatible = "aspeed,ast2500-sdhci", .data = &ast2400_sdhci_pdata, }, +- { .compatible = "aspeed,ast2600-sdhci", .data = &ast2600_sdhci_pdata, }, ++ { .compatible = "aspeed,ast2500-sdhci", }, ++ { .compatible = "aspeed,ast2600-sdhci", }, ++ { .compatible = "aspeed,ast2600-emmc", }, + { } + }; + MODULE_DEVICE_TABLE(of, aspeed_sdhci_of_match); +@@ -522,11 +463,22 @@ + .remove_new = aspeed_sdhci_remove, + }; + ++static const struct of_device_id aspeed_sdc_of_match[] = { ++ { .compatible = "aspeed,ast2400-sd-controller", }, ++ { .compatible = "aspeed,ast2500-sd-controller", }, ++ { .compatible = "aspeed,ast2600-sd-controller", .data = &ast2600_sdc_info}, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match); ++ + static int aspeed_sdc_probe(struct platform_device *pdev) + + { + struct device_node *parent, *child; + struct aspeed_sdc *sdc; ++ const struct of_device_id *match = NULL; ++ const struct aspeed_sdc_info *info = NULL; + int ret; + + sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); +@@ -535,6 +487,23 @@ + + spin_lock_init(&sdc->lock); + ++ match = of_match_device(aspeed_sdc_of_match, &pdev->dev); ++ if (!match) ++ return -ENODEV; ++ ++ if (match->data) ++ info = match->data; ++ ++ if (info) { ++ if (info->flag & PROBE_AFTER_ASSET_DEASSERT) { ++ sdc->rst = devm_reset_control_get(&pdev->dev, NULL); ++ if (!IS_ERR(sdc->rst)) { ++ reset_control_assert(sdc->rst); ++ reset_control_deassert(sdc->rst); ++ } ++ } ++ } ++ + sdc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sdc->clk)) + return PTR_ERR(sdc->clk); +@@ -579,15 +548,6 @@ + clk_disable_unprepare(sdc->clk); + } + +-static const struct of_device_id aspeed_sdc_of_match[] = { +- { .compatible = "aspeed,ast2400-sd-controller", }, +- { .compatible = "aspeed,ast2500-sd-controller", }, +- { .compatible = "aspeed,ast2600-sd-controller", }, +- { } +-}; +- +-MODULE_DEVICE_TABLE(of, aspeed_sdc_of_match); +- + static struct platform_driver aspeed_sdc_driver = { + .driver = { + .name = "sd-controller-aspeed", +diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig +--- a/drivers/net/can/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/can/Kconfig 2025-12-23 10:16:08.679241296 +0000 +@@ -66,6 +66,12 @@ + + if CAN_NETLINK + ++config CAN_ASPEED ++ tristate "ASPEED CAN" ++ depends on ARM64 || COMPILE_TEST ++ help ++ ASPEED CAN driver. ++ + config CAN_CALC_BITTIMING + bool "CAN bit-timing calculation" + default y +diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile +--- a/drivers/net/can/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/can/Makefile 2025-12-23 10:16:12.642174838 +0000 +@@ -15,6 +15,7 @@ + obj-y += usb/ + obj-y += softing/ + ++obj-$(CONFIG_CAN_ASPEED) += aspeed_can.o + obj-$(CONFIG_CAN_AT91) += at91_can.o + obj-$(CONFIG_CAN_BXCAN) += bxcan.o + obj-$(CONFIG_CAN_CAN327) += can327.o +diff --git a/drivers/net/can/aspeed_can.c b/drivers/net/can/aspeed_can.c +--- a/drivers/net/can/aspeed_can.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/can/aspeed_can.c 2025-12-23 10:16:20.944035686 +0000 +@@ -0,0 +1,1720 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ASPEED CAN device driver ++ * ++ * Copyright (C) 2023 ASPEED Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRIVER_NAME "aspeed_can" ++ ++#define CAN_AC_SEG (0x0004) /* classic can seg */ ++#define CAN_FD_SEG (0x0008) /* can fd seg */ ++#define CAN_BITITME (0x0010) /* prescaler */ ++#define CAN_INTF (0x0014) /* can interrupt flag */ ++#define CAN_INTE (0x0018) /* can interrupt enabled */ ++#define CAN_TSTAT (0x001c) /* can transmit status */ ++ ++#define CAN_CTRL (0x0028) ++#define CAN_ERR_STAT (0x002c) /* err ctrl and rx/tx err status */ ++ ++#define CAN_RBUF (0x0070) /* receive buffer registers 0x070-0x88b(*/ ++#define CAN_TBUF (0x0890) /* transmit buffer registers 0x890-0x10ab */ ++ ++#define CAN_RBUF_ID (CAN_RBUF + 0x0000) ++#define CAN_RBUF_CTL (CAN_RBUF + 0x0004) ++#define CAN_RBUF_TYPE (CAN_RBUF + 0x0008) ++#define CAN_RBUF_ACF (CAN_RBUF + 0x000C) ++#define CAN_RBUF_DATA (CAN_RBUF + 0x0010) ++ ++#define CAN_TBUF_ID (CAN_TBUF + 0x0000) ++#define CAN_TBUF_CTL (CAN_TBUF + 0x0004) ++#define CAN_TBUF_TYPE (CAN_TBUF + 0x0008) ++#define CAN_TBUF_ACF (CAN_TBUF + 0x000C) ++#define CAN_TBUF_DATA (CAN_TBUF + 0x0010) ++ ++#define CAN_MODE_CONFIG (0x1100) ++ ++#define CAN_TBUF_MIRROR (0x1190) ++#define CAN_TBUF_READ_ID (CAN_TBUF_MIRROR + 0x0000) ++#define CAN_TBUF_READ_CTL (CAN_TBUF_MIRROR + 0x0004) ++#define CAN_TBUF_READ_TYPE (CAN_TBUF_MIRROR + 0x0008) ++#define CAN_TBUF_READ_ACF (CAN_TBUF_MIRROR + 0x000C) ++#define CAN_TBUF_READ_DATA (CAN_TBUF_MIRROR + 0x0010) ++ ++/* CAN_AC_SEG(0x0004) bit description */ ++#define CAN_TIMING_AC_SEG1_MASK GENMASK(8, 0) ++#define CAN_TIMING_AC_SEG2_MASK GENMASK(22, 16) ++#define CAN_TIMING_AC_SJW_MASK GENMASK(30, 24) ++ ++#define CAN_TIMING_AC_SEG1_BITOFF 0 ++#define CAN_TIMING_AC_SEG2_BITOFF 16 ++#define CAN_TIMING_AC_SJW_BITOFF 24 ++ ++/* CAN_FD_SEG(0x0008) bit description */ ++#define CAN_TIMING_FD_SEG1_MASK GENMASK(7, 0) ++#define CAN_TIMING_FD_SEG2_MASK GENMASK(22, 16) ++#define CAN_TIMING_FD_SJW_MASK GENMASK(30, 24) ++ ++#define CAN_TIMING_FD_SEG1_BITOFF 0 ++#define CAN_TIMING_FD_SEG2_BITOFF 16 ++#define CAN_TIMING_FD_SJW_BITOFF 24 ++ ++/* CAN_BITTIME(0x0010) bit description */ ++#define CAN_TIMING_PRESC_MASK GENMASK(4, 0) ++#define CAN_TIMING_PRESC_BITOFF 0 ++#define CAN_TIMING_FD_SSPOFF_MASK GENMASK(15, 8) ++#define CAN_TIMING_FD_SSPOFF_BITOFF 8 ++ ++/* CAN_INTF(0x0014) bit description */ ++#define CAN_INT_EIF_BIT BIT(1) /* error interrupt flag */ ++#define CAN_INT_TSIF_BIT BIT(2) /* transmission secondary interrupt flag */ ++#define CAN_INT_TPIF_BIT BIT(3) /* transmission primary interrupt flag */ ++#define CAN_INT_RAFIF_BIT BIT(4) /* RB almost full interrupt flag */ ++#define CAN_INT_RFIF_BIT BIT(5) /* RB full interrupt flag */ ++#define CAN_INT_ROIF_BIT BIT(6) /* RB overflow interrupt flag */ ++#define CAN_INT_RIF_BIT BIT(7) /* receive interrupt flag */ ++#define CAN_INT_BEIF_BIT BIT(8) /* bus error interrupt flag */ ++#define CAN_INT_ALIF_BIT BIT(9) /* arbitration loss interrupt flag */ ++#define CAN_INT_EPIF_BIT BIT(10) /* error passive interrupt flag */ ++#define CAN_EPASS_BIT BIT(30) /* check if device is error passive */ ++#define CAN_INT_EWARN_BIT BIT(31) /* error Warning limit reached */ ++ ++/* CAN_INTE(0x0018) bit description */ ++#define CAN_INT_EIE_BIT BIT(1) /* error interrupt enable */ ++#define CAN_INT_TSIE_BIT BIT(2) /* transmission secondary interrupt enable */ ++#define CAN_INT_TPIE_BIT BIT(3) /* transmission secondary interrupt enable */ ++#define CAN_INT_RAFIE_BIT BIT(4) /* RB almost full interrupt enable */ ++#define CAN_INT_RFIE_BIT BIT(5) /* RB full interrupt enable */ ++#define CAN_INT_ROIE_BIT BIT(6) /* RB overflow interrupt enable */ ++#define CAN_INT_RIE_BIT BIT(7) /* receive interrupt enable */ ++#define CAN_INT_BEIE_BIT BIT(8) /* bus error interrupt enable */ ++#define CAN_INT_ALIE_BIT BIT(9) /* arbitration loss interrupt enable */ ++#define CAN_INT_EPIE_BIT BIT(10) /* error passive interrupt enable */ ++ ++/* CAN_TSTAT(0x001C) bit description */ ++#define CAN_TSTAT1_MASK GENMASK(10, 8) ++#define CAN_TSTAT1_BITOFF 8 ++#define CAN_TSTAT2_HANDLE_MASK GENMASK(23, 16) ++#define CAN_TSTAT2_HANDLE_BITOFF 16 ++#define CAN_TSTAT2_MASK GENMASK(26, 24) ++#define CAN_TSTAT2_BITOFF 24 ++ ++/* CAN_CTRL(0x0028) bit description */ ++#define CAN_CTRL_BUSOFF_BIT BIT(0) ++#define CAN_CTRL_LBMIMOD_BIT BIT(5) /* set loop back mode, internal */ ++#define CAN_CTRL_LBMEMOD_BIT BIT(6) /* set loop back mode, external */ ++#define CAN_CTRL_RST_BIT BIT(7) /* set reset bit */ ++#define CAN_CTRL_TSALL_BIT BIT(9) /* transmit secondary all frame */ ++#define CAN_CTRL_TSONE_BIT BIT(10) /* transmit secondary one frame */ ++#define CAN_CTRL_TPE_BIT BIT(12) /* transmit primary enable */ ++#define CAN_CTRL_STBY_BIT BIT(13) /* transceiver standby */ ++#define CAN_CTRL_TBSEL_BIT BIT(15) /* transmit buffer select */ ++#define CAN_CTRL_TSSTAT_MASK GENMASK(17, 16) /* Transmission secondary status bits */ ++#define CAN_CTRL_TSFF_BIT BIT(18) /* transmit secondary buffer full flag */ ++#define CAN_CTRL_TTBM_BIT BIT(20) /* set TTTBM as 1->full TTCAN mode */ ++#define CAN_CTRL_TSMODE_BIT BIT(21) /* set TSMODE as 1->FIFO mode */ ++#define CAN_CTRL_TSNEXT_BIT BIT(22) /* transmit buffer secondary NEXT */ ++#define CAN_CTRL_RSTAT_NOT_EMPTY_MASKT GENMASK(25, 24) ++#define CAN_CTRL_RREL_BIT BIT(28) /* receive buffer release */ ++#define CAN_CTRL_SACK_BIT BIT(31) /* self-ack mode */ ++ ++#define STB_IS_EMPTY 0x0 ++ ++/* CAN_ERR(0x002c) bit description */ ++#define CAN_ERR_EWL_MASK GENMASK(3, 0) /* programmable error warning limit */ ++#define CAN_ERR_EWL_BITOFF 0 ++#define CAN_ERR_AFWL_MASK GENMASK(7, 4) /* receive buffer almost full warning limit */ ++#define CAN_ERR_AFWL_BITOFF 4 ++#define CAN_ERR_ALC_MASK GENMASK(12, 8) /* arbitration lost capture */ ++#define CAN_ERR_ALC_BITOFF 8 ++#define CAN_ERR_KOER_MASK GENMASK(15, 13) /* kind of error */ ++#define CAN_ERR_KOER_BITOFF 13 ++#define CAN_ERR_RECNT_MASK GENMASK(23, 16) /* receive error count */ ++#define CAN_ERR_RECNT_BITOFF 16 ++#define CAN_ERR_TECNT_MASK GENMASK(31, 24) /* transmit error count */ ++#define CAN_ERR_TECNT_BITOFF 24 ++ ++/* CAN BUF bit description*/ ++#define CAN_BUF_ID_BFF_MASK GENMASK(28, 18) /* frame identifier */ ++#define CAN_BUF_ID_BFF_BITOFF 18 ++#define CAN_BUF_ID_EFF_MASK GENMASK(28, 0) /* identifier extension */ ++#define CAN_BUF_ID_EFF_BITOFF 0 ++#define CAN_BUF_DLC_MASK GENMASK(10, 0) /* data length code */ ++#define CAN_BUF_IDE_BIT BIT(16) /* identifier extension */ ++#define CAN_BUF_FDF_BIT BIT(17) /* CAN FD frame format */ ++#define CAN_BUF_BRS_BIT BIT(18) /* CAN FD bit rate switch enable */ ++#define CAN_BUF_RMF_BIT BIT(20) /* remot frame */ ++#define CAN_BUF_HANDLE_BITOFF 24 ++ ++#define KOER_BIT_ERROR_MASK (BIT(0)) ++#define KOER_FORM_ERROR_MASK (BIT(1)) ++#define KOER_STUFF_ERROR_MASK (BIT(1) | BIT(0)) ++#define KOER_ACK_ERROR_MASK (BIT(2)) ++#define KOER_CRC_ERROR_MASK (BIT(2) | BIT(0)) ++#define KOER_OTH_ERROR_MASK (BIT(2) | BIT(1)) ++ ++#define STAT_AFWL 0x04 ++#define STAT_EWL 0x0b ++ ++#define STB_IDX_RING_SZ 3 ++ ++#define PTB_MODE 0x01 ++#define STB_MODE 0x02 ++ ++#define STB_TX_MODE_ALL 0x01 /* secondary tx buffer mode */ ++#define STB_TX_MODE_ONE 0x02 /* secondary tx buffer mode */ ++#define STB_POLICY_FIFO 0x10 /* secondary tx buffer fifo mode */ ++#define STB_POLICY_PRIO 0x20 /* secondary tx buffer prioity mode */ ++ ++#define STB_INVALID_HANDLE_VAL 0xffffffff ++#define STB_INVALID_SKB_IDX 0xffffffff ++ ++/* SW flag */ ++#define ASPEED_CAN_INTERNEL_LOOPBACK 0x00000001 ++ ++struct can_stb_ring_obj { ++ u32 idx; ++ u32 skb_idx; ++ u32 handle; ++ struct can_stb_ring_obj *next; ++}; ++ ++struct aspeed_can_priv { ++ /* Fix the location of "struct can_priv can" ++ * in this struct. ++ */ ++ struct can_priv can; ++ void __iomem *reg_base; ++ struct device *dev; ++ /* Lock for synchronizing TX interrupt handling */ ++ spinlock_t tx_lock; ++ struct can_stb_ring_obj *stb_ring; ++ struct can_stb_ring_obj *head_ptr; ++ struct can_stb_ring_obj *tail_ptr; ++ u32 tx_max; ++ struct napi_struct napi; ++ struct clk *clk; ++ struct reset_control *reset; ++ u32 tb_mode; ++ u32 stb_mode_policy; ++ u32 frame_handle; ++ u32 flag; ++}; ++ ++static const struct can_bittiming_const aspeed_can_bittiming_const = { ++ .name = DRIVER_NAME, ++ .tseg1_min = 2, ++ .tseg1_max = 513, ++ .tseg2_min = 1, ++ .tseg2_max = 128, ++ .sjw_max = 128, ++ .brp_min = 1, ++ .brp_max = 128, ++ .brp_inc = 1, ++}; ++ ++static const struct can_bittiming_const aspeed_canfd_bittiming_const = { ++ .name = DRIVER_NAME, ++ .tseg1_min = 2, ++ .tseg1_max = 257, ++ .tseg2_min = 1, ++ .tseg2_max = 128, ++ .sjw_max = 128, ++ .brp_min = 1, ++ .brp_max = 128, ++ .brp_inc = 1, ++}; ++ ++inline void aspeed_can_set_bit(struct aspeed_can_priv *priv, ++ u32 reg_off, u32 bit) ++{ ++ u32 reg_val; ++ ++ reg_val = readl(priv->reg_base + reg_off); ++ reg_val |= bit; ++ writel(reg_val, priv->reg_base + reg_off); ++} ++ ++inline void aspeed_can_clr_bit(struct aspeed_can_priv *priv, ++ u32 reg_off, u32 bit) ++{ ++ u32 reg_val; ++ ++ reg_val = readl(priv->reg_base + reg_off); ++ reg_val &= ~(bit); ++ writel(reg_val, priv->reg_base + reg_off); ++} ++ ++inline void aspeed_can_clr_irq_bits(struct aspeed_can_priv *priv, ++ u32 reg_off, u32 bits) ++{ ++ writel(bits, priv->reg_base + reg_off); ++} ++ ++static bool aspeed_can_check_reset_mode(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ if (!(readl(priv->reg_base + CAN_CTRL) & CAN_CTRL_RST_BIT)) ++ return false; ++ ++ return true; ++} ++ ++static void aspeed_can_stb_ring_obj_init(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int i; ++ ++ for (i = 0; i < STB_IDX_RING_SZ; i++) { ++ priv->stb_ring[i].idx = i + 1; ++ priv->stb_ring[i].skb_idx = i + 1; ++ priv->stb_ring[i].handle = STB_INVALID_HANDLE_VAL; ++ ++ if (i == STB_IDX_RING_SZ - 1) { ++ priv->stb_ring[i].next = &priv->stb_ring[0]; ++ } else { ++ priv->stb_ring[i].next = ++ &priv->stb_ring[i + 1]; ++ } ++ } ++ ++ priv->head_ptr = &priv->stb_ring[0]; ++ priv->tail_ptr = &priv->stb_ring[0]; ++} ++ ++static u32 aspeed_can_get_skb_idx(struct net_device *ndev, ++ u32 handle) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct can_stb_ring_obj *tmp_ptr = priv->tail_ptr; ++ ++ if (tmp_ptr->handle == handle) ++ return tmp_ptr->skb_idx; ++ ++ tmp_ptr = priv->tail_ptr->next; ++ ++ while (tmp_ptr != priv->head_ptr) { ++ if (tmp_ptr->handle == handle) ++ return tmp_ptr->skb_idx; ++ tmp_ptr = tmp_ptr->next; ++ } ++ ++ return STB_INVALID_SKB_IDX; ++} ++ ++static int aspeed_can_drop_ring_obj(struct net_device *ndev, ++ u32 handle) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct can_stb_ring_obj *tmp_ptr = priv->tail_ptr; ++ u32 tmp_skb_idx; ++ ++ if (tmp_ptr->handle == handle) { ++ /* keep original skb idx */ ++ tmp_ptr->handle = STB_INVALID_HANDLE_VAL; ++ priv->tail_ptr = priv->tail_ptr->next; ++ return 0; ++ } ++ ++ tmp_ptr = priv->tail_ptr->next; ++ ++ while (tmp_ptr != priv->head_ptr) { ++ if (tmp_ptr->handle == handle) { ++ tmp_skb_idx = tmp_ptr->skb_idx; ++ tmp_ptr->handle = priv->tail_ptr->handle; ++ tmp_ptr->skb_idx = priv->tail_ptr->skb_idx; ++ ++ priv->tail_ptr->skb_idx = tmp_skb_idx; ++ priv->tail_ptr->handle = STB_INVALID_HANDLE_VAL; ++ priv->tail_ptr = priv->tail_ptr->next; ++ return 0; ++ } ++ ++ tmp_ptr = tmp_ptr->next; ++ } ++ ++ return -1; ++} ++ ++static u32 aspeed_can_get_frame_num(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct can_stb_ring_obj *tmp_ptr = priv->tail_ptr; ++ u32 len = 0; ++ ++ if (tmp_ptr == priv->head_ptr) ++ return STB_IDX_RING_SZ; ++ ++ while (tmp_ptr != priv->head_ptr) { ++ len++; ++ tmp_ptr = tmp_ptr->next; ++ } ++ ++ return len; ++} ++ ++static int aspeed_can_set_reset_mode(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ unsigned long timeout; ++ ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_RST_BIT); ++ ++ timeout = jiffies + (1 * HZ); ++ while (!aspeed_can_check_reset_mode(ndev)) { ++ if (time_after(jiffies, timeout)) { ++ netdev_warn(ndev, "timed out for config mode\n"); ++ return -ETIMEDOUT; ++ } ++ ++ usleep_range(500, 10000); ++ } ++ ++ aspeed_can_stb_ring_obj_init(ndev); ++ ++ return 0; ++} ++ ++static int aspeed_can_exit_reset_mode(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ unsigned long timeout; ++ ++ aspeed_can_clr_bit(priv, CAN_CTRL, CAN_CTRL_RST_BIT); ++ ++ timeout = jiffies + (1 * HZ); ++ while (aspeed_can_check_reset_mode(ndev)) { ++ if (time_after(jiffies, timeout)) { ++ netdev_warn(ndev, "timed out for config mode\n"); ++ return -ETIMEDOUT; ++ } ++ ++ usleep_range(500, 10000); ++ } ++ ++ return 0; ++} ++ ++static void aspeed_can_err_init(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 reg_val; ++ ++ reg_val = (STAT_AFWL << CAN_ERR_AFWL_BITOFF) & CAN_ERR_AFWL_MASK; ++ reg_val |= (STAT_EWL << CAN_ERR_EWL_BITOFF) & CAN_ERR_EWL_MASK; ++ ++ writel(reg_val, priv->reg_base + CAN_ERR_STAT); ++} ++ ++static void aspeed_can_interrupt_conf(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 inte; ++ ++ inte = CAN_INT_EIE_BIT | CAN_INT_TSIE_BIT | CAN_INT_TPIE_BIT | ++ CAN_INT_RAFIE_BIT | CAN_INT_RFIE_BIT | CAN_INT_ROIE_BIT | ++ CAN_INT_RIE_BIT | CAN_INT_BEIE_BIT | CAN_INT_ALIE_BIT | ++ CAN_INT_EPIE_BIT; ++ ++ writel(inte, priv->reg_base + CAN_INTE); ++} ++ ++static void aspeed_can_tb_mode_conf(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ if (priv->tb_mode == STB_MODE) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TBSEL_BIT); ++ else ++ aspeed_can_clr_bit(priv, CAN_CTRL, CAN_CTRL_TBSEL_BIT); ++} ++ ++static void aspeed_can_sack_conf(struct net_device *ndev, bool enable) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ if (enable) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_SACK_BIT); ++ else ++ aspeed_can_clr_bit(priv, CAN_CTRL, CAN_CTRL_SACK_BIT); ++} ++ ++static void aspeed_can_loopback_ext_conf(struct net_device *ndev, bool enable) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ if (enable) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_LBMEMOD_BIT); ++ else ++ aspeed_can_clr_bit(priv, CAN_CTRL, CAN_CTRL_LBMEMOD_BIT); ++} ++ ++static void aspeed_can_loopback_int_conf(struct net_device *ndev, bool enable) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ if (enable) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_LBMIMOD_BIT); ++ else ++ aspeed_can_clr_bit(priv, CAN_CTRL, CAN_CTRL_LBMIMOD_BIT); ++} ++ ++static void aspeed_can_fd_init(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ aspeed_can_set_bit(priv, CAN_MODE_CONFIG, BIT(0)); ++} ++ ++static void aspeed_can_reg_dump(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 reg_val; ++ u32 i; ++ ++ reg_val = readl(priv->reg_base + CAN_AC_SEG); ++ netdev_info(ndev, "(REG004) CAN_AC_SEG = 0x%08x\n", reg_val); ++ netdev_info(ndev, " ac_seg1 (8:0): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_AC_SEG1_MASK) >> ++ CAN_TIMING_AC_SEG1_BITOFF)); ++ netdev_info(ndev, " ac_seg2(22:16): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_AC_SEG2_MASK) >> ++ CAN_TIMING_AC_SEG2_BITOFF)); ++ netdev_info(ndev, " ac_sjw (30:24): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_AC_SJW_MASK) >> ++ CAN_TIMING_AC_SJW_BITOFF)); ++ ++ reg_val = readl(priv->reg_base + CAN_FD_SEG); ++ netdev_info(ndev, "(REG008) CAN_FD_SEG = 0x%08x\n", reg_val); ++ netdev_info(ndev, " fd_seg1 (7:0): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_FD_SEG1_MASK) >> ++ CAN_TIMING_FD_SEG1_BITOFF)); ++ netdev_info(ndev, " fd_seg2(22:16): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_FD_SEG2_MASK) >> ++ CAN_TIMING_FD_SEG2_BITOFF)); ++ netdev_info(ndev, " fd_sjw (30:24): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_FD_SJW_MASK) >> ++ CAN_TIMING_FD_SJW_BITOFF)); ++ ++ reg_val = readl(priv->reg_base + CAN_BITITME); ++ netdev_info(ndev, "(REG010) CAN_BITITME = 0x%08x\n", reg_val); ++ netdev_info(ndev, " prescaler (4:0): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_PRESC_MASK) >> ++ CAN_TIMING_PRESC_BITOFF)); ++ netdev_info(ndev, " fd_sspoff(15:8): 0x%02x\n", ++ (u32)((reg_val & CAN_TIMING_FD_SSPOFF_MASK) >> ++ CAN_TIMING_FD_SSPOFF_BITOFF)); ++ ++ reg_val = readl(priv->reg_base + CAN_INTF); ++ netdev_info(ndev, "(REG014) CAN_INTF = 0x%08x\n", reg_val); ++ netdev_info(ndev, " EIF (1): %d\n", (reg_val & CAN_INT_EIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " TSIF (2): %d\n", (reg_val & CAN_INT_TSIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " TPIF (3): %d\n", (reg_val & CAN_INT_TPIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " RAFIF (4): %d\n", (reg_val & CAN_INT_RAFIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " RFIF (5): %d\n", (reg_val & CAN_INT_RFIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " ROIF (6): %d\n", (reg_val & CAN_INT_ROIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " RIF (7): %d\n", (reg_val & CAN_INT_RIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " BEIF (8): %d\n", (reg_val & CAN_INT_BEIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " EPIF (10): %d\n", (reg_val & CAN_INT_EPIF_BIT) ? 1 : 0); ++ netdev_info(ndev, " EPASS(30): %d\n", (reg_val & CAN_EPASS_BIT) ? 1 : 0); ++ netdev_info(ndev, " EWARN(31): %d\n", (reg_val & CAN_INT_EWARN_BIT) ? 1 : 0); ++ ++ reg_val = readl(priv->reg_base + CAN_INTE); ++ netdev_info(ndev, "(REG018) CAN_INTE = 0x%08x\n", reg_val); ++ netdev_info(ndev, " EIE (1): %d\n", (reg_val & CAN_INT_EIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " TSIE (2): %d\n", (reg_val & CAN_INT_TSIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " TPIE (3): %d\n", (reg_val & CAN_INT_TPIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " RAFIE (4): %d\n", (reg_val & CAN_INT_RAFIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " RFIE (5): %d\n", (reg_val & CAN_INT_RFIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " ROIE (6): %d\n", (reg_val & CAN_INT_ROIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " RIE (7): %d\n", (reg_val & CAN_INT_RIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " BEIE (8): %d\n", (reg_val & CAN_INT_BEIE_BIT) ? 1 : 0); ++ netdev_info(ndev, " EPIE (10): %d\n", (reg_val & CAN_INT_EPIE_BIT) ? 1 : 0); ++ ++ reg_val = readl(priv->reg_base + CAN_TSTAT); ++ netdev_info(ndev, "(REG01C) CAN_TSTAT = 0x%08x\n", reg_val); ++ netdev_info(ndev, " TSTAT1 (10:8): 0x%02x\n", ++ (u32)((reg_val & CAN_TSTAT1_MASK) >> CAN_TSTAT1_BITOFF)); ++ netdev_info(ndev, " TSTAT2(26:24): 0x%02x\n", ++ (u32)((reg_val & CAN_TSTAT2_MASK) >> CAN_TSTAT2_BITOFF)); ++ netdev_info(ndev, " 000 : no tx\n"); ++ netdev_info(ndev, " 001 : on-going\n"); ++ netdev_info(ndev, " 010 : lost arbitration\n"); ++ netdev_info(ndev, " 011 : transmitted\n"); ++ netdev_info(ndev, " 100 : aborted\n"); ++ netdev_info(ndev, " 101 : disturbed\n"); ++ netdev_info(ndev, " 110 : reject\n"); ++ ++ reg_val = readl(priv->reg_base + CAN_CTRL); ++ netdev_info(ndev, "(REG028) CAN_CTRL = 0x%08x\n", reg_val); ++ netdev_info(ndev, " BUSOFF (0): %d\n", (reg_val & CAN_CTRL_BUSOFF_BIT) ? 1 : 0); ++ netdev_info(ndev, " LBMIMOD (5): %d\n", (reg_val & CAN_CTRL_LBMIMOD_BIT) ? 1 : 0); ++ netdev_info(ndev, " LBMEMOD (6): %d\n", (reg_val & CAN_CTRL_LBMEMOD_BIT) ? 1 : 0); ++ netdev_info(ndev, " RST (7): %d\n", (reg_val & CAN_CTRL_RST_BIT) ? 1 : 0); ++ netdev_info(ndev, " TPE (12): %d\n", (reg_val & CAN_CTRL_TPE_BIT) ? 1 : 0); ++ netdev_info(ndev, " STBY (13): %d\n", (reg_val & CAN_CTRL_STBY_BIT) ? 1 : 0); ++ netdev_info(ndev, " TBSEL (15): %d\n", (reg_val & CAN_CTRL_TBSEL_BIT) ? 1 : 0); ++ netdev_info(ndev, " TTBM (20): %d\n", (reg_val & CAN_CTRL_TTBM_BIT) ? 1 : 0); ++ netdev_info(ndev, " TSMODE (21): %d\n", (reg_val & CAN_CTRL_TSMODE_BIT) ? 1 : 0); ++ netdev_info(ndev, " TSNEXT (22): %d\n", (reg_val & CAN_CTRL_TSNEXT_BIT) ? 1 : 0); ++ netdev_info(ndev, " RSTAT (25:24): 0x%02x\n", ++ (u32)((reg_val & CAN_CTRL_RSTAT_NOT_EMPTY_MASKT) >> 24)); ++ netdev_info(ndev, " RREL (28): %d\n", (reg_val & CAN_CTRL_RREL_BIT) ? 1 : 0); ++ ++ reg_val = readl(priv->reg_base + CAN_ERR_STAT); ++ netdev_info(ndev, "(REG02C) ERR_STAT = 0x%08x\n", reg_val); ++ netdev_info(ndev, " EWL (3:0): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_EWL_MASK) >> CAN_ERR_EWL_BITOFF)); ++ netdev_info(ndev, " AFWL (7:4): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_AFWL_MASK) >> CAN_ERR_AFWL_BITOFF)); ++ netdev_info(ndev, " ALC (12:8): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_ALC_MASK) >> CAN_ERR_ALC_BITOFF)); ++ netdev_info(ndev, " KOER (15:13): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_KOER_MASK) >> CAN_ERR_KOER_BITOFF)); ++ netdev_info(ndev, " 000 : no error\n"); ++ netdev_info(ndev, " 001 : bit error\n"); ++ netdev_info(ndev, " 010 : form error\n"); ++ netdev_info(ndev, " 011 : stuff error\n"); ++ netdev_info(ndev, " 100 : ack error\n"); ++ netdev_info(ndev, " 101 : crc error\n"); ++ netdev_info(ndev, " 110 : other error\n"); ++ netdev_info(ndev, " RECNT (23:16): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_RECNT_MASK) >> CAN_ERR_RECNT_BITOFF)); ++ netdev_info(ndev, " TECNT (31:24): 0x%02x\n", ++ (u32)((reg_val & CAN_ERR_TECNT_MASK) >> CAN_ERR_TECNT_BITOFF)); ++ ++ for (i = 0; i < 0x50; i += 4) ++ netdev_info(ndev, "REG(%03x): 0x%08x\n", i, readl(priv->reg_base + i)); ++} ++ ++static int aspeed_can_set_bittiming(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct can_bittiming *bt = &priv->can.bittiming; ++ struct can_bittiming *dbt = &priv->can.data_bittiming; ++ u32 btr0, btr1; ++ u32 can_fd_ssp; ++ ++ /* check whether the CAN controller is in reset mode */ ++ if (!aspeed_can_check_reset_mode(ndev)) { ++ netdev_alert(ndev, ++ "BUG! Cannot set bittiming - not in reset mode\n"); ++ return -EPERM; ++ } ++ ++ /* parameter sanity */ ++ if (bt->brp < 1 || bt->prop_seg + bt->phase_seg1 < 1 || ++ bt->phase_seg2 < 1 || bt->sjw < 1) { ++ netdev_alert(ndev, ++ "invalid bittiming parameters\n"); ++ netdev_alert(ndev, ++ "brp: %x, prop: %x, seg1: %x, seg2: %x, sjw: %x\n", ++ bt->brp, bt->prop_seg, bt->phase_seg1, ++ bt->phase_seg2, bt->sjw); ++ return -EPERM; ++ } ++ ++ /* setting prescaler value in PRESC Register */ ++ btr0 = (bt->brp - 1); ++ ++ /* setting time segment 1 in SEG_1 Register */ ++ btr1 = (1 + bt->prop_seg + bt->phase_seg1 - 2); ++ ++ /* Setting Time Segment 2 in SEG_2 Register */ ++ btr1 |= (bt->phase_seg2 - 1) << 16; ++ ++ /* Setting Synchronous jump width in BTR Register */ ++ btr1 |= (bt->sjw - 1) << 24; ++ ++ writel(btr1, priv->reg_base + CAN_AC_SEG); ++ ++ if (dbt->prop_seg != 0 && dbt->phase_seg1 != 0 && ++ dbt->phase_seg2 != 0) { ++ if (bt->brp != dbt->brp) { ++ netdev_alert(ndev, ++ "nominal (%d) and data (%d) prescaler isn't the same\n", ++ bt->brp, dbt->brp); ++ return -EPERM; ++ } ++ ++ if (dbt->sjw < 1) { ++ netdev_alert(ndev, ++ "invalid data sjw %x\n", dbt->sjw); ++ return -EPERM; ++ } ++ ++ /* Setting Time Segment 1 in BTR Register */ ++ btr1 = 1 + dbt->prop_seg + dbt->phase_seg1 - 2; ++ ++ /* Setting Time Segment 2 in BTR Register */ ++ btr1 |= (dbt->phase_seg2 - 1) << 16; ++ ++ /* Setting Synchronous jump width in BTR Register */ ++ btr1 |= (dbt->sjw - 1) << 24; ++ ++ writel(btr1, priv->reg_base + CAN_FD_SEG); ++ ++ /* seg_1 + 1 */ ++ can_fd_ssp = 1 + dbt->prop_seg + dbt->phase_seg1 + 1; ++ ++ btr0 |= can_fd_ssp << 8; ++ } ++ ++ writel(btr0 | 0x10000000, priv->reg_base + CAN_BITITME); ++ ++ return 0; ++} ++ ++static int aspeed_can_chip_start(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int err; ++ ++ /* Check if it is in reset mode */ ++ err = aspeed_can_set_reset_mode(ndev); ++ if (err < 0) ++ return err; ++ ++ err = aspeed_can_set_bittiming(ndev); ++ if (err < 0) ++ return err; ++ ++ /* Always config to FD mode since it is ++ * backward compatibility with CAN2.0B. ++ */ ++ aspeed_can_fd_init(ndev); ++ ++ err = aspeed_can_exit_reset_mode(ndev); ++ if (err < 0) ++ return err; ++ ++ aspeed_can_err_init(ndev); ++ aspeed_can_interrupt_conf(ndev); ++ ++ aspeed_can_tb_mode_conf(ndev); ++ ++ aspeed_can_loopback_ext_conf(ndev, false); ++ if (priv->flag & ASPEED_CAN_INTERNEL_LOOPBACK) { ++ aspeed_can_loopback_int_conf(ndev, true); ++ aspeed_can_sack_conf(ndev, true); ++ } else { ++ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { ++ aspeed_can_loopback_ext_conf(ndev, true); ++ aspeed_can_sack_conf(ndev, true); ++ } ++ aspeed_can_loopback_int_conf(ndev, false); ++ } ++ ++ priv->can.state = CAN_STATE_ERROR_ACTIVE; ++ priv->frame_handle = 0; ++ ++ return 0; ++} ++ ++static int aspeed_can_do_set_mode(struct net_device *ndev, enum can_mode mode) ++{ ++ int ret; ++ ++ switch (mode) { ++ case CAN_MODE_START: ++ ret = aspeed_can_chip_start(ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "aspeed_can_chip_start failed!\n"); ++ return ret; ++ } ++ netif_wake_queue(ndev); ++ break; ++ default: ++ netdev_err(ndev, "unexpect can mode: %d\n", (u32)mode); ++ ret = -EOPNOTSUPP; ++ break; ++ } ++ ++ return ret; ++} ++ ++static void aspeed_can_write_frame(struct net_device *ndev, ++ struct sk_buff *skb) ++{ ++ u32 id; ++ struct canfd_frame *cf = (struct canfd_frame *)skb->data; ++ u32 i; ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 buf_ctrl = 0; ++ u32 can_ctrl; ++ u32 can_type; ++ ++ can_ctrl = readl(priv->reg_base + CAN_CTRL); ++ ++ /* Watch carefully on the bit sequence */ ++ if (cf->can_id & CAN_EFF_FLAG) { ++ id = (cf->can_id & CAN_EFF_MASK) << CAN_BUF_ID_EFF_BITOFF; ++ buf_ctrl |= CAN_BUF_IDE_BIT; ++ } else { ++ /* Standard CAN ID format */ ++ id = (cf->can_id & CAN_SFF_MASK) << CAN_BUF_ID_BFF_BITOFF; ++ } ++ ++ if (cf->can_id & CAN_RTR_FLAG) ++ buf_ctrl |= CAN_BUF_RMF_BIT; ++ ++ buf_ctrl |= can_fd_len2dlc(cf->len); ++ ++ if (can_is_canfd_skb(skb)) { ++ buf_ctrl |= CAN_BUF_FDF_BIT; ++ if (cf->flags & CANFD_BRS) ++ buf_ctrl |= CAN_BUF_BRS_BIT; ++ } ++ ++ can_type = priv->frame_handle << CAN_BUF_HANDLE_BITOFF; ++ ++ writel(id, priv->reg_base + CAN_TBUF_ID); ++ writel(buf_ctrl, priv->reg_base + CAN_TBUF_CTL); ++ writel(can_type, priv->reg_base + CAN_TBUF_TYPE); ++ ++ writel(0x0, priv->reg_base + CAN_TBUF_TYPE); ++ writel(0x0, priv->reg_base + CAN_TBUF_ACF); ++ ++ if (cf->can_id & CAN_RTR_FLAG) ++ return; ++ ++ for (i = 0; i < (cf->len / 4 + 3) * 4; i += 4) { ++ writel(*(u32 *)(cf->data + i), ++ priv->reg_base + CAN_TBUF_DATA + i); ++ } ++} ++ ++static int aspeed_can_start_frame_xmit(struct sk_buff *skb, ++ struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 i = 0; ++ u32 can_ctrl; ++ u32 int_flag; ++ u32 skb_idx; ++ int ret; ++ ++ /* avoid transmitting data finish suddenly */ ++ while (i < 3) { ++ can_ctrl = readl(priv->reg_base + CAN_CTRL); ++ int_flag = readl(priv->reg_base + CAN_INTF); ++ i++; ++ } ++ ++ /* check whether STB or PTB is full */ ++ if ((can_ctrl & CAN_CTRL_TBSEL_BIT) && ++ (can_ctrl & CAN_CTRL_TSFF_BIT)) ++ return -ENOSPC; ++ ++ if (!(can_ctrl & CAN_CTRL_TBSEL_BIT) && ++ ((can_ctrl & CAN_CTRL_TPE_BIT) || ++ (int_flag & CAN_INT_TPIF_BIT))) ++ return -ENOSPC; ++ ++ if (priv->frame_handle == 0x100) ++ priv->frame_handle = 0; ++ ++ /* Use skb idex to check whether ++ * the same handle already in STB. ++ */ ++ skb_idx = aspeed_can_get_skb_idx(ndev, priv->frame_handle); ++ if (skb_idx != STB_INVALID_SKB_IDX) { ++ netdev_err(ndev, "repeat handle %d\n", priv->frame_handle); ++ return -ENOSPC; ++ } ++ ++ aspeed_can_write_frame(ndev, skb); ++ ++ if (can_ctrl & CAN_CTRL_TBSEL_BIT) { ++ /* STB */ ++ ret = can_put_echo_skb(skb, ndev, priv->head_ptr->skb_idx, 0); ++ if (ret) { ++ netdev_err(ndev, "fail to put skb for stb %d\n", ret); ++ return ret; ++ } ++ ++ priv->head_ptr->handle = priv->frame_handle; ++ priv->head_ptr = priv->head_ptr->next; ++ ++ /* STB is full */ ++ if (priv->head_ptr == priv->tail_ptr) ++ netif_stop_queue(ndev); ++ ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSNEXT_BIT); ++ ++ /* no frame is transmitting or there is no unhandled finish flag */ ++ if ((priv->stb_mode_policy & STB_TX_MODE_ONE) && ++ !(can_ctrl & CAN_CTRL_TSONE_BIT) && ++ !(int_flag & CAN_INT_TSIF_BIT)) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSONE_BIT); ++ else if ((priv->stb_mode_policy & STB_TX_MODE_ALL) && ++ !(can_ctrl & CAN_CTRL_TSALL_BIT) && ++ !(int_flag & CAN_INT_TSIF_BIT)) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSALL_BIT); ++ } else { ++ /* PTB */ ++ ret = can_put_echo_skb(skb, ndev, 0, 0); ++ if (ret) { ++ netdev_err(ndev, "fail to put skb for ptb %d\n", ret); ++ return ret; ++ } ++ ++ netif_stop_queue(ndev); ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TPE_BIT); ++ } ++ ++ priv->frame_handle++; ++ ++ return 0; ++} ++ ++static netdev_tx_t aspeed_can_start_xmit(struct sk_buff *skb, ++ struct net_device *ndev) ++{ ++ int ret; ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ unsigned long flags; ++ ++ if (can_dev_dropped_skb(ndev, skb)) ++ return NET_XMIT_DROP; ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); ++ ++ ret = aspeed_can_start_frame_xmit(skb, ndev); ++ ++ spin_unlock_irqrestore(&priv->tx_lock, flags); ++ ++ if (ret) { ++ netdev_err(ndev, "Fail to transmit data!\n"); ++ netif_stop_queue(ndev); ++ return NET_XMIT_DROP; ++ } ++ ++ return NETDEV_TX_OK; ++} ++ ++static int aspeed_can_rx(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ struct canfd_frame *cf; ++ struct sk_buff *skb; ++ u32 data; ++ u32 rx_stat; ++ u32 buf_ctrl_reg; ++ u32 reg_val; ++ u32 i; ++ ++ rx_stat = readl(priv->reg_base + CAN_CTRL); ++ if (!(rx_stat & CAN_CTRL_RSTAT_NOT_EMPTY_MASKT)) ++ return 0; ++ ++ buf_ctrl_reg = readl(priv->reg_base + CAN_RBUF_CTL); ++ if (buf_ctrl_reg & CAN_BUF_FDF_BIT) ++ skb = alloc_canfd_skb(ndev, &cf); ++ else ++ skb = alloc_can_skb(ndev, (struct can_frame **)&cf); ++ ++ if (!skb) { ++ stats->rx_dropped++; ++ return 0; ++ } ++ ++ reg_val = readl(priv->reg_base + CAN_RBUF_ID); ++ if (buf_ctrl_reg & CAN_BUF_IDE_BIT) { ++ cf->can_id = reg_val & CAN_BUF_ID_EFF_MASK; ++ cf->can_id |= CAN_EFF_FLAG; ++ } else { ++ cf->can_id = (reg_val & CAN_BUF_ID_BFF_MASK) >> ++ CAN_BUF_ID_BFF_BITOFF; ++ } ++ ++ if (buf_ctrl_reg & CAN_BUF_RMF_BIT) ++ cf->can_id |= CAN_RTR_FLAG; ++ ++ if (buf_ctrl_reg & CAN_BUF_FDF_BIT) ++ cf->len = can_fd_dlc2len(buf_ctrl_reg & CAN_BUF_DLC_MASK); ++ else ++ cf->len = can_cc_dlc2len(buf_ctrl_reg & CAN_BUF_DLC_MASK); ++ ++ /* Check the frame received is FD or not*/ ++ for (i = 0; i < cf->len; i += 4) { ++ data = readl(priv->reg_base + CAN_RBUF_DATA + i); ++ *(u32 *)(cf->data + i) = data; ++ } ++ ++ if (!(cf->can_id & CAN_RTR_FLAG)) ++ stats->rx_bytes += cf->len; ++ ++ stats->rx_packets++; ++ ++ /* release frame */ ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_RREL_BIT); ++ ++ netif_receive_skb(skb); ++ ++ return 1; ++} ++ ++static enum can_state aspeed_can_current_error_state(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 status; ++ u32 rx_cnt; ++ u32 tx_cnt; ++ u32 ctrl; ++ ++ ctrl = readl(priv->reg_base + CAN_CTRL); ++ status = readl(priv->reg_base + CAN_INTF); ++ rx_cnt = (readl(priv->reg_base + CAN_ERR_STAT) & CAN_ERR_RECNT_MASK) >> ++ CAN_ERR_RECNT_BITOFF; ++ tx_cnt = (readl(priv->reg_base + CAN_ERR_STAT) & CAN_ERR_TECNT_MASK) >> ++ CAN_ERR_TECNT_BITOFF; ++ ++ if (ctrl & CAN_CTRL_BUSOFF_BIT) ++ return CAN_STATE_BUS_OFF; ++ else if ((status & CAN_EPASS_BIT) == CAN_EPASS_BIT) ++ return CAN_STATE_ERROR_PASSIVE; ++ else if (rx_cnt > 96 || tx_cnt > 96) ++ return CAN_STATE_ERROR_WARNING; ++ else ++ return CAN_STATE_ERROR_ACTIVE; ++} ++ ++static void aspeed_can_set_error_state(struct net_device *ndev, ++ enum can_state new_state, ++ struct can_frame *cf) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 ecr = readl(priv->reg_base + CAN_ERR_STAT); ++ u32 txerr = (ecr & CAN_ERR_RECNT_MASK) >> CAN_ERR_RECNT_BITOFF; ++ u32 rxerr = (ecr & CAN_ERR_TECNT_MASK) >> CAN_ERR_TECNT_BITOFF; ++ enum can_state tx_state = txerr >= rxerr ? new_state : 0; ++ enum can_state rx_state = txerr <= rxerr ? new_state : 0; ++ ++ /* non-ERROR states are handled elsewhere */ ++ if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) ++ return; ++ ++ can_change_state(ndev, cf, tx_state, rx_state); ++ ++ if (cf) { ++ cf->can_id |= CAN_ERR_CNT; ++ cf->data[6] = txerr; ++ cf->data[7] = rxerr; ++ } ++} ++ ++static void aspeed_can_update_error_state_after_rxtx(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ enum can_state old_state = priv->can.state; ++ enum can_state new_state; ++ ++ /* changing error state due to successful frame RX/TX can only ++ * occur from these states ++ */ ++ if (old_state != CAN_STATE_ERROR_WARNING && ++ old_state != CAN_STATE_ERROR_PASSIVE) ++ return; ++ ++ new_state = aspeed_can_current_error_state(ndev); ++ ++ if (new_state != old_state) { ++ struct sk_buff *skb; ++ struct can_frame *cf; ++ ++ skb = alloc_can_err_skb(ndev, &cf); ++ ++ aspeed_can_set_error_state(ndev, new_state, skb ? cf : NULL); ++ ++ if (skb) ++ netif_rx(skb); ++ } ++} ++ ++static void aspeed_can_err_interrupt(struct net_device *ndev, u32 isr) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ struct can_frame cf = { }; ++ u32 err; ++ u32 ctrl; ++ u32 koer; ++ u32 recnt; ++ u32 tecnt; ++ ++ netdev_err(ndev, "in error interrupt.\n"); ++ ++ aspeed_can_reg_dump(ndev); ++ ++ err = readl(priv->reg_base + CAN_ERR_STAT); ++ ctrl = readl(priv->reg_base + CAN_CTRL); ++ ++ koer = (err & CAN_ERR_KOER_MASK) >> CAN_ERR_KOER_BITOFF; ++ recnt = (err & CAN_ERR_RECNT_MASK) >> CAN_ERR_RECNT_BITOFF; ++ tecnt = (err & CAN_ERR_TECNT_MASK) >> CAN_ERR_TECNT_BITOFF; ++ ++ if (ctrl & CAN_CTRL_BUSOFF_BIT) { ++ priv->can.state = CAN_STATE_BUS_OFF; ++ priv->can.can_stats.bus_off++; ++ /* Leave device in Config Mode in bus-off state */ ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_RST_BIT); ++ can_bus_off(ndev); ++ cf.can_id |= CAN_ERR_BUSOFF; ++ } else { ++ enum can_state new_state = aspeed_can_current_error_state(ndev); ++ ++ if (new_state != priv->can.state) ++ aspeed_can_set_error_state(ndev, new_state, &cf); ++ } ++ ++ if (isr & CAN_INT_ALIF_BIT) { ++ priv->can.can_stats.arbitration_lost++; ++ cf.can_id |= CAN_ERR_LOSTARB; ++ cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; ++ } ++ ++ if (isr & CAN_INT_ROIF_BIT) { ++ stats->rx_over_errors++; ++ stats->rx_errors++; ++ cf.can_id |= CAN_ERR_CRTL; ++ cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; ++ } ++ ++ /* Check for error interrupt */ ++ if (isr & CAN_INT_BEIF_BIT) { ++ bool berr_reporting = false; ++ ++ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { ++ berr_reporting = true; ++ cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; ++ } ++ ++ if (koer == KOER_ACK_ERROR_MASK) { ++ netdev_err(ndev, "ACK error exists\n"); ++ stats->tx_errors++; ++ if (berr_reporting) { ++ cf.can_id |= CAN_ERR_ACK; ++ cf.data[3] = CAN_ERR_PROT_LOC_ACK; ++ } ++ } ++ ++ if (koer == KOER_BIT_ERROR_MASK) { ++ netdev_err(ndev, "BIT error exists\n"); ++ stats->tx_errors++; ++ if (berr_reporting) { ++ cf.can_id |= CAN_ERR_PROT; ++ cf.data[2] = CAN_ERR_PROT_BIT; ++ } ++ } ++ ++ if (koer == KOER_STUFF_ERROR_MASK) { ++ netdev_err(ndev, "STUFF error exists\n"); ++ stats->rx_errors++; ++ if (berr_reporting) { ++ cf.can_id |= CAN_ERR_PROT; ++ cf.data[2] = CAN_ERR_PROT_STUFF; ++ } ++ } ++ ++ if (koer == KOER_FORM_ERROR_MASK) { ++ netdev_err(ndev, "FORM error exists\n"); ++ stats->rx_errors++; ++ if (berr_reporting) { ++ cf.can_id |= CAN_ERR_PROT; ++ cf.data[2] = CAN_ERR_PROT_FORM; ++ } ++ } ++ ++ if (koer == KOER_CRC_ERROR_MASK) { ++ netdev_err(ndev, "CRC error exists\n"); ++ stats->rx_errors++; ++ if (berr_reporting) { ++ cf.can_id |= CAN_ERR_PROT; ++ cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; ++ } ++ } ++ ++ priv->can.can_stats.bus_error++; ++ } ++ ++ if (cf.can_id) { ++ struct can_frame *skb_cf; ++ struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); ++ ++ if (skb) { ++ skb_cf->can_id |= cf.can_id; ++ memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); ++ netif_rx(skb); ++ } ++ } ++} ++ ++static int aspeed_can_rx_poll(struct napi_struct *napi, int quota) ++{ ++ struct net_device *ndev = napi->dev; ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int work_done = 0; ++ u32 rx_stat; ++ u32 ier; ++ ++ rx_stat = readl(priv->reg_base + CAN_CTRL); ++ ++ while ((rx_stat & CAN_CTRL_RSTAT_NOT_EMPTY_MASKT) != 0 && ++ (work_done < quota)) { ++ work_done += aspeed_can_rx(ndev); ++ rx_stat = readl(priv->reg_base + CAN_CTRL); ++ } ++ ++ if (work_done) ++ aspeed_can_update_error_state_after_rxtx(ndev); ++ ++ if (work_done < quota) { ++ if (napi_complete_done(napi, work_done)) { ++ ier = readl(priv->reg_base + CAN_INTE); ++ ier |= CAN_INT_RIE_BIT; ++ writel(ier, priv->reg_base + CAN_INTE); ++ } ++ } ++ ++ return work_done; ++} ++ ++static void aspeed_can_tx_interrupt(struct net_device *ndev, u32 intr_flag) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ struct net_device_stats *stats = &ndev->stats; ++ u32 frames_in_fifo; ++ unsigned long flags; ++ u32 can_ctrl; ++ u32 stauts_2; ++ u32 handle; ++ u32 skb_idx; ++ int ret; ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); ++ ++ /* PTB */ ++ if (intr_flag & CAN_INT_TPIF_BIT) { ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_TPIF_BIT); ++ stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL); ++ goto exit; ++ } ++ ++ /* STB */ ++ can_ctrl = readl(priv->reg_base + CAN_CTRL); ++ stauts_2 = readl(priv->reg_base + CAN_TSTAT); ++ ++ switch (priv->stb_mode_policy) { ++ case (STB_TX_MODE_ONE | STB_POLICY_PRIO): ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_TSIF_BIT); ++ ++ handle = (stauts_2 & CAN_TSTAT2_HANDLE_MASK) >> ++ CAN_TSTAT2_HANDLE_BITOFF; ++ skb_idx = aspeed_can_get_skb_idx(ndev, handle); ++ if (skb_idx == STB_INVALID_SKB_IDX) { ++ netdev_err(ndev, "fail to get skb idx (%x)\n", ++ STB_TX_MODE_ONE | STB_POLICY_PRIO); ++ } else { ++ stats->tx_bytes += can_get_echo_skb(ndev, skb_idx, NULL); ++ stats->tx_packets++; ++ ret = aspeed_can_drop_ring_obj(ndev, handle); ++ /* priv->tail_ptr = priv->tail_ptr.next; */ ++ if (ret < 0) { ++ netdev_err(ndev, "fail to drop a ring obj (%x)\n", ++ STB_TX_MODE_ONE | STB_POLICY_PRIO); ++ } ++ } ++ ++ /* something still in STB */ ++ if ((can_ctrl & CAN_CTRL_TSSTAT_MASK) != 0x0) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSONE_BIT); ++ break; ++ ++ case (STB_TX_MODE_ALL | STB_POLICY_FIFO): ++ case (STB_TX_MODE_ALL | STB_POLICY_PRIO): ++ ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_TSIF_BIT); ++ ++ frames_in_fifo = aspeed_can_get_frame_num(ndev); ++ ++ /* Potential situation: A frame is submitted before this ISR. ++ * That new frame waits for transmission. ++ * This frame should be handled in the next ISR. ++ */ ++ if (can_ctrl & CAN_CTRL_TSSTAT_MASK) ++ frames_in_fifo--; ++ ++ while (frames_in_fifo != 0) { ++ stats->tx_bytes += can_get_echo_skb(ndev, ++ priv->tail_ptr->skb_idx, ++ NULL); ++ priv->tail_ptr->handle = STB_INVALID_HANDLE_VAL; ++ priv->tail_ptr = priv->tail_ptr->next; ++ stats->tx_packets++; ++ frames_in_fifo--; ++ } ++ ++ /* something still in STB */ ++ if (can_ctrl & CAN_CTRL_TSSTAT_MASK) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSALL_BIT); ++ ++ break; ++ default: ++ /* including STB_TX_MODE_ONE and STB_POLICY_FIFO mode */ ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_TSIF_BIT); ++ ++ stats->tx_bytes += can_get_echo_skb(ndev, ++ priv->tail_ptr->skb_idx, ++ NULL); ++ priv->tail_ptr = priv->tail_ptr->next; ++ stats->tx_packets++; ++ ++ /* something in STB */ ++ if ((can_ctrl & CAN_CTRL_TSSTAT_MASK) != 0x0) ++ aspeed_can_set_bit(priv, CAN_CTRL, CAN_CTRL_TSONE_BIT); ++ } ++ ++exit: ++ netif_wake_queue(ndev); ++ ++ spin_unlock_irqrestore(&priv->tx_lock, flags); ++ ++ aspeed_can_update_error_state_after_rxtx(ndev); ++} ++ ++static irqreturn_t aspeed_can_interrupt(int irq, void *dev_id) ++{ ++ struct net_device *ndev = (struct net_device *)dev_id; ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 isr; ++ u32 ier; ++ u32 isr_errors; ++ u32 rx_int_mask = CAN_INT_RIF_BIT; ++ ++ isr = readl(priv->reg_base + CAN_INTF); ++ if (!isr) ++ return IRQ_NONE; ++ ++ /* Check for Tx interrupt and Processing it */ ++ if (isr & (CAN_INT_TPIF_BIT | CAN_INT_TSIF_BIT)) ++ aspeed_can_tx_interrupt(ndev, isr); ++ ++ if (isr & CAN_INT_RAFIF_BIT) { ++ netdev_warn(ndev, "Receive buffer is almost full\n"); ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_RAFIF_BIT); ++ } ++ ++ if (isr & CAN_INT_RFIF_BIT) { ++ netdev_warn(ndev, "Receive buffer is full\n"); ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, CAN_INT_RFIF_BIT); ++ } ++ ++ /* Check for the type of error interrupt and Processing it */ ++ isr_errors = isr & (CAN_INT_EIF_BIT | CAN_INT_ROIF_BIT | ++ CAN_INT_BEIF_BIT | CAN_INT_ALIF_BIT | ++ CAN_INT_EPIF_BIT | CAN_INT_EWARN_BIT); ++ if (isr_errors) { ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, isr_errors); ++ aspeed_can_err_interrupt(ndev, isr); ++ } ++ ++ /* Check for the type of receive interrupt and Processing it */ ++ if (isr & rx_int_mask) { ++ aspeed_can_clr_irq_bits(priv, CAN_INTF, rx_int_mask); ++ ier = readl(priv->reg_base + CAN_INTE); ++ ier &= ~rx_int_mask; /* CAN_INT_RIE_BIT */ ++ writel(ier, priv->reg_base + CAN_INTE); ++ napi_schedule(&priv->napi); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void aspeed_can_chip_stop(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ /* Disable interrupts and leave the can in configuration mode */ ++ ret = aspeed_can_set_reset_mode(ndev); ++ if (ret < 0) ++ netdev_dbg(ndev, "aspeed_can_set_reset_mode() Failed\n"); ++ ++ priv->can.state = CAN_STATE_STOPPED; ++} ++ ++static int aspeed_can_open(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ ret = pm_runtime_get_sync(priv->dev); ++ if (ret < 0) { ++ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", ++ __func__, ret); ++ goto err; ++ } ++ ++ ret = request_irq(ndev->irq, aspeed_can_interrupt, 0, ++ ndev->name, ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "irq allocation for CAN failed\n"); ++ goto err; ++ } ++ ++ /* Set chip into reset mode */ ++ ret = aspeed_can_set_reset_mode(ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "mode resetting failed!\n"); ++ goto err_irq; ++ } ++ ++ /* Common open */ ++ ret = open_candev(ndev); ++ if (ret) ++ goto err_irq; ++ ++ ret = aspeed_can_chip_start(ndev); ++ if (ret < 0) { ++ netdev_err(ndev, "aspeed_can_chip_start failed!\n"); ++ goto err_candev; ++ } ++ ++ napi_enable(&priv->napi); ++ netif_start_queue(ndev); ++ ++ return 0; ++ ++err_candev: ++ close_candev(ndev); ++err_irq: ++ free_irq(ndev->irq, ndev); ++err: ++ pm_runtime_put(priv->dev); ++ ++ return ret; ++} ++ ++static int aspeed_can_close(struct net_device *ndev) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ netif_stop_queue(ndev); ++ napi_disable(&priv->napi); ++ aspeed_can_chip_stop(ndev); ++ free_irq(ndev->irq, ndev); ++ close_candev(ndev); ++ pm_runtime_put(priv->dev); ++ ++ return 0; ++} ++ ++static int aspeed_can_get_berr_counter(const struct net_device *ndev, ++ struct can_berr_counter *bec) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ ret = pm_runtime_get_sync(priv->dev); ++ if (ret < 0) { ++ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", ++ __func__, ret); ++ pm_runtime_put(priv->dev); ++ return ret; ++ } ++ ++ bec->rxerr = (readl(priv->reg_base + CAN_ERR_STAT) & ++ CAN_ERR_RECNT_MASK) >> CAN_ERR_RECNT_BITOFF; ++ bec->txerr = (readl(priv->reg_base + CAN_ERR_STAT) & ++ CAN_ERR_TECNT_MASK) >> CAN_ERR_TECNT_BITOFF; ++ ++ pm_runtime_put(priv->dev); ++ ++ return 0; ++} ++ ++static int aspeed_can_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) ++{ ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ u32 reg; ++ ++ reg = readl(priv->reg_base + CAN_BITITME); ++ *tdcv = (reg & CAN_TIMING_FD_SSPOFF_MASK) >> CAN_TIMING_FD_SSPOFF_BITOFF; ++ ++ return 0; ++} ++ ++static const struct net_device_ops aspeed_can_netdev_ops = { ++ .ndo_open = aspeed_can_open, ++ .ndo_stop = aspeed_can_close, ++ .ndo_start_xmit = aspeed_can_start_xmit, ++ .ndo_change_mtu = can_change_mtu, ++}; ++ ++static const struct ethtool_ops aspeed_can_ethtool_ops = { ++ .get_ts_info = ethtool_op_get_ts_info, ++}; ++ ++static int __maybe_unused aspeed_can_suspend(struct device *dev) ++{ ++ struct net_device *ndev = dev_get_drvdata(dev); ++ ++ if (netif_running(ndev)) { ++ netif_stop_queue(ndev); ++ netif_device_detach(ndev); ++ aspeed_can_chip_stop(ndev); ++ } ++ ++ return pm_runtime_force_suspend(dev); ++} ++ ++static int __maybe_unused aspeed_can_resume(struct device *dev) ++{ ++ struct net_device *ndev = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = pm_runtime_force_resume(dev); ++ if (ret) { ++ dev_err(dev, "pm_runtime_force_resume failed on resume\n"); ++ return ret; ++ } ++ ++ if (netif_running(ndev)) { ++ ret = aspeed_can_chip_start(ndev); ++ if (ret) { ++ dev_err(dev, "aspeed_can_chip_start failed on resume\n"); ++ return ret; ++ } ++ ++ netif_device_attach(ndev); ++ netif_start_queue(ndev); ++ } ++ ++ return 0; ++} ++ ++static int __maybe_unused aspeed_can_runtime_suspend(struct device *dev) ++{ ++ struct net_device *ndev = dev_get_drvdata(dev); ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ clk_disable_unprepare(priv->clk); ++ ++ return 0; ++} ++ ++static int __maybe_unused aspeed_can_runtime_resume(struct device *dev) ++{ ++ struct net_device *ndev = dev_get_drvdata(dev); ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ int ret; ++ ++ ret = clk_prepare_enable(priv->clk); ++ if (ret) { ++ dev_err(dev, "Cannot enable clock.\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops aspeed_can_dev_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(aspeed_can_suspend, aspeed_can_resume) ++ SET_RUNTIME_PM_OPS(aspeed_can_runtime_suspend, aspeed_can_runtime_resume, NULL) ++}; ++ ++/* Match table for OF platform binding */ ++static const struct of_device_id aspeed_can_of_match[] = { ++ { .compatible = "aspeed,canfd", .data = NULL }, ++ { /* end of list */ }, ++}; ++MODULE_DEVICE_TABLE(of, aspeed_can_of_match); ++ ++static int aspeed_can_probe(struct platform_device *pdev) ++{ ++ struct net_device *ndev; ++ struct aspeed_can_priv *priv; ++ int ret; ++ /* Fixed to temporarily. */ ++ u32 rx_max = 3; ++ u32 can_clk; ++ ++ ndev = alloc_candev(sizeof(struct aspeed_can_priv), 4); ++ if (!ndev) ++ return -ENOMEM; ++ ++ priv = netdev_priv(ndev); ++ ++ priv->dev = &pdev->dev; ++ priv->reg_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(priv->reg_base)) { ++ ret = PTR_ERR(priv->reg_base); ++ goto err; ++ }; ++ ++ priv->can.bittiming_const = &aspeed_can_bittiming_const; ++ priv->can.data_bittiming_const = &aspeed_canfd_bittiming_const; ++ priv->can.do_set_mode = aspeed_can_do_set_mode; ++ priv->can.do_get_berr_counter = aspeed_can_get_berr_counter; ++ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | ++ CAN_CTRLMODE_BERR_REPORTING | ++ CAN_CTRLMODE_FD | ++ CAN_CTRLMODE_CC_LEN8_DLC | ++ CAN_CTRLMODE_TDC_AUTO; ++ priv->can.do_get_auto_tdcv = aspeed_can_get_auto_tdcv; ++ ++ priv->tx_max = 3; ++ spin_lock_init(&priv->tx_lock); ++ ++ /* Get IRQ for the device */ ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ goto err_free; ++ ++ ndev->irq = ret; ++ ++ /* We support local echo */ ++ ndev->flags |= IFF_ECHO; ++ ++ platform_set_drvdata(pdev, ndev); ++ SET_NETDEV_DEV(ndev, &pdev->dev); ++ ndev->netdev_ops = &aspeed_can_netdev_ops; ++ ndev->ethtool_ops = &aspeed_can_ethtool_ops; ++ ++ /* Getting the CAN can_clk info */ ++ priv->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(priv->clk)) { ++ dev_err(&pdev->dev, "missing clock\n"); ++ return PTR_ERR(priv->clk); ++ } ++ ++ can_clk = clk_get_rate(priv->clk); ++ if (!can_clk) { ++ dev_err(&pdev->dev, "invalid clock\n"); ++ return -EINVAL; ++ } ++ ++ ret = clk_prepare_enable(priv->clk); ++ if (ret) { ++ dev_err(&pdev->dev, "can not enable the clock\n"); ++ return ret; ++ } ++ ++ priv->can.clock.freq = can_clk; ++ ++ priv->tb_mode = PTB_MODE; ++ if (of_property_read_bool(priv->dev->of_node, "can-stb-mode")) ++ priv->tb_mode = STB_MODE; ++ ++ priv->stb_mode_policy = STB_TX_MODE_ONE; ++ if (of_property_read_bool(priv->dev->of_node, "can-stb-tx-all")) ++ priv->stb_mode_policy = STB_TX_MODE_ALL; ++ ++ if (of_property_read_bool(priv->dev->of_node, "can-stb-priority")) ++ priv->stb_mode_policy |= STB_POLICY_PRIO; ++ else ++ priv->stb_mode_policy |= STB_POLICY_FIFO; ++ ++ priv->stb_ring = kzalloc(sizeof(*priv->stb_ring) * ++ STB_IDX_RING_SZ, GFP_KERNEL); ++ aspeed_can_stb_ring_obj_init(ndev); ++ ++ if (of_property_read_bool(priv->dev->of_node, "can-internal-loopback")) ++ priv->flag |= ASPEED_CAN_INTERNEL_LOOPBACK; ++ ++ priv->reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); ++ if (IS_ERR(priv->reset)) ++ return PTR_ERR(priv->reset); ++ ++ reset_control_deassert(priv->reset); ++ ++ ret = aspeed_can_set_reset_mode(ndev); ++ if (ret < 0) ++ goto err; ++ ++ pm_runtime_enable(&pdev->dev); ++ ret = pm_runtime_get_sync(&pdev->dev); ++ if (ret < 0) { ++ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", ++ __func__, ret); ++ goto err_disableclks; ++ } ++ ++ netif_napi_add_weight(ndev, &priv->napi, aspeed_can_rx_poll, rx_max); ++ ++ ret = register_candev(ndev); ++ if (ret) { ++ dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); ++ goto err_disableclks; ++ } ++ ++ pm_runtime_put(&pdev->dev); ++ ++ netdev_dbg(ndev, "reg_base = 0x%p, irq = %d, clock = %d\n", ++ priv->reg_base, ndev->irq, priv->can.clock.freq); ++ ++ return 0; ++ ++err_disableclks: ++ pm_runtime_put(priv->dev); ++ pm_runtime_disable(&pdev->dev); ++err_free: ++ free_candev(ndev); ++err: ++ kfree(priv->stb_ring); ++ ++ return ret; ++} ++ ++static void aspeed_can_remove(struct platform_device *pdev) ++{ ++ struct net_device *ndev = platform_get_drvdata(pdev); ++ struct aspeed_can_priv *priv = netdev_priv(ndev); ++ ++ reset_control_assert(priv->reset); ++ unregister_candev(ndev); ++ pm_runtime_disable(&pdev->dev); ++ kfree(priv->stb_ring); ++ ++ free_candev(ndev); ++} ++ ++static struct platform_driver aspeed_can_driver = { ++ .probe = aspeed_can_probe, ++ .remove = aspeed_can_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .pm = &aspeed_can_dev_pm_ops, ++ .of_match_table = aspeed_can_of_match, ++ }, ++}; ++ ++module_platform_driver(aspeed_can_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Chin-Ting Kuo "); ++MODULE_DESCRIPTION("ASPEED CAN interface"); +diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig +--- a/drivers/net/ethernet/faraday/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/ethernet/faraday/Kconfig 2025-12-23 10:16:21.145032318 +0000 +@@ -6,7 +6,7 @@ + config NET_VENDOR_FARADAY + bool "Faraday devices" + default y +- depends on ARM || COMPILE_TEST ++ depends on ARM || ARM64 || COMPILE_TEST + help + If you have a network (Ethernet) card belonging to this class, say Y. + +@@ -28,11 +28,10 @@ + + config FTGMAC100 + tristate "Faraday FTGMAC100 Gigabit Ethernet support" +- depends on ARM || COMPILE_TEST +- depends on !64BIT || BROKEN ++ depends on ARM || ARM64 || COMPILE_TEST + select PHYLIB + select FIXED_PHY +- select MDIO_ASPEED if MACH_ASPEED_G6 ++ select MDIO_ASPEED if ARCH_ASPEED + select CRC32 + help + This driver supports the FTGMAC100 Gigabit Ethernet controller +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +--- a/drivers/net/ethernet/faraday/ftgmac100.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/ethernet/faraday/ftgmac100.c 2025-12-23 10:16:21.145032318 +0000 +@@ -9,6 +9,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include ++#include + #include + #include + #include +@@ -19,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -98,6 +100,7 @@ + struct work_struct reset_task; + struct mii_bus *mii_bus; + struct clk *clk; ++ struct reset_control *rst; + + /* AST2500/AST2600 RMII ref clock gate */ + struct clk *rclk; +@@ -119,6 +122,9 @@ + /* Misc */ + bool need_mac_restart; + bool is_aspeed; ++ ++ /* AST2700 SGMII */ ++ struct phy *sgmii; + }; + + static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr) +@@ -148,6 +154,23 @@ + { + u32 maccr = 0; + ++ /* RMII needs SCU reset to clear status */ ++ if (priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) { ++ int err; ++ ++ err = reset_control_assert(priv->rst); ++ if (err) { ++ dev_err(priv->dev, "Failed to reset mac (%d)\n", err); ++ return err; ++ } ++ usleep_range(10000, 20000); ++ err = reset_control_deassert(priv->rst); ++ if (err) { ++ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err); ++ return err; ++ } ++ } ++ + switch (priv->cur_speed) { + case SPEED_10: + case 0: /* no link */ +@@ -265,10 +288,12 @@ + iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR); + + /* Setup RX ring buffer base */ +- iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR); ++ iowrite32(lower_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADR); ++ iowrite32(upper_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADDR_HIGH); + + /* Setup TX ring buffer base */ +- iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR); ++ iowrite32(lower_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_NPTXR_BADR); ++ iowrite32(upper_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_TXR_BADDR_HIGH); + + /* Configure RX buffer size */ + iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE), +@@ -321,6 +346,7 @@ + static void ftgmac100_start_hw(struct ftgmac100 *priv) + { + u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); ++ struct phy_device *phydev = priv->netdev->phydev; + + /* Keep the original GMAC and FAST bits */ + maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE); +@@ -349,6 +375,10 @@ + if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + maccr |= FTGMAC100_MACCR_RM_VLAN; + ++ if (of_device_is_compatible(priv->dev->of_node, "aspeed,ast2700-mac") && ++ phydev && phydev->interface == PHY_INTERFACE_MODE_RMII) ++ maccr |= FTGMAC100_MACCR_RMII_ENABLE; ++ + /* Hit the HW */ + iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); + } +@@ -425,7 +455,9 @@ + priv->rx_skbs[entry] = skb; + + /* Store DMA address into RX desc */ +- rxdes->rxdes3 = cpu_to_le32(map); ++ rxdes->rxdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI, ++ upper_32_bits(map))); ++ rxdes->rxdes3 = cpu_to_le32(lower_32_bits(map)); + + /* Ensure the above is ordered vs clearing the OWN bit */ + dma_wmb(); +@@ -551,7 +583,8 @@ + csum_vlan & 0xffff); + + /* Tear down DMA mapping, do necessary cache management */ +- map = le32_to_cpu(rxdes->rxdes3); ++ map = le32_to_cpu(rxdes->rxdes3) | ++ ((le32_to_cpu(rxdes->rxdes2) & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16); + + #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU) + /* When we don't have an iommu, we can save cycles by not +@@ -628,9 +661,12 @@ + struct ftgmac100_txdes *txdes, + u32 ctl_stat) + { +- dma_addr_t map = le32_to_cpu(txdes->txdes3); ++ dma_addr_t map; + size_t len; + ++ map = le32_to_cpu(txdes->txdes3) | ++ ((le32_to_cpu(txdes->txdes2) & FTGMAC100_TXDES2_TXBUF_BADR_HI) << 16); ++ + if (ctl_stat & FTGMAC100_TXDES0_FTS) { + len = skb_headlen(skb); + dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); +@@ -784,7 +820,9 @@ + f_ctl_stat |= FTGMAC100_TXDES0_FTS; + if (nfrags == 0) + f_ctl_stat |= FTGMAC100_TXDES0_LTS; +- txdes->txdes3 = cpu_to_le32(map); ++ txdes->txdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI, ++ upper_32_bits((ulong)map))); ++ txdes->txdes3 = cpu_to_le32(lower_32_bits(map)); + txdes->txdes1 = cpu_to_le32(csum_vlan); + + /* Next descriptor */ +@@ -812,7 +850,9 @@ + ctl_stat |= FTGMAC100_TXDES0_LTS; + txdes->txdes0 = cpu_to_le32(ctl_stat); + txdes->txdes1 = 0; +- txdes->txdes3 = cpu_to_le32(map); ++ txdes->txdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI, ++ upper_32_bits((ulong)map))); ++ txdes->txdes3 = cpu_to_le32(lower_32_bits(map)); + + /* Next one */ + pointer = ftgmac100_next_tx_pointer(priv, pointer); +@@ -887,7 +927,10 @@ + for (i = 0; i < priv->rx_q_entries; i++) { + struct ftgmac100_rxdes *rxdes = &priv->rxdes[i]; + struct sk_buff *skb = priv->rx_skbs[i]; +- dma_addr_t map = le32_to_cpu(rxdes->rxdes3); ++ dma_addr_t map; ++ ++ map = le32_to_cpu(rxdes->rxdes3) | ++ ((le32_to_cpu(rxdes->rxdes2) & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16); + + if (!skb) + continue; +@@ -986,7 +1029,9 @@ + for (i = 0; i < priv->rx_q_entries; i++) { + rxdes = &priv->rxdes[i]; + rxdes->rxdes0 = 0; +- rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma); ++ rxdes->rxdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI, ++ upper_32_bits(priv->rx_scratch_dma))); ++ rxdes->rxdes3 = cpu_to_le32(lower_32_bits(priv->rx_scratch_dma)); + } + /* Mark the end of the ring */ + rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask); +@@ -1730,16 +1775,21 @@ + static void ftgmac100_phy_disconnect(struct net_device *netdev) + { + struct ftgmac100 *priv = netdev_priv(netdev); ++ struct phy_device *phydev = netdev->phydev; + +- if (!netdev->phydev) +- return; ++ if (priv->sgmii) { ++ phy_exit(priv->sgmii); ++ devm_phy_put(priv->dev, priv->sgmii); ++ } + +- phy_disconnect(netdev->phydev); +- if (of_phy_is_fixed_link(priv->dev->of_node)) +- of_phy_deregister_fixed_link(priv->dev->of_node); ++ if (phydev) { ++ phy_disconnect(phydev); ++ if (of_phy_is_fixed_link(priv->dev->of_node)) ++ of_phy_deregister_fixed_link(priv->dev->of_node); + +- if (priv->use_ncsi) +- fixed_phy_unregister(netdev->phydev); ++ if (priv->use_ncsi) ++ fixed_phy_unregister(phydev); ++ } + } + + static void ftgmac100_destroy_mdio(struct net_device *netdev) +@@ -1882,7 +1932,8 @@ + np = pdev->dev.of_node; + if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") || + of_device_is_compatible(np, "aspeed,ast2500-mac") || +- of_device_is_compatible(np, "aspeed,ast2600-mac"))) { ++ of_device_is_compatible(np, "aspeed,ast2600-mac") || ++ of_device_is_compatible(np, "aspeed,ast2700-mac"))) { + priv->rxdes0_edorr_mask = BIT(30); + priv->txdes0_edotr_mask = BIT(30); + priv->is_aspeed = true; +@@ -1913,40 +1964,22 @@ + goto err_phy_connect; + } + err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link, +- PHY_INTERFACE_MODE_MII); ++ PHY_INTERFACE_MODE_RMII); + if (err) { + dev_err(&pdev->dev, "Connecting PHY failed\n"); + goto err_phy_connect; + } +- } else if (np && of_phy_is_fixed_link(np)) { +- struct phy_device *phy; +- +- err = of_phy_register_fixed_link(np); +- if (err) { +- dev_err(&pdev->dev, "Failed to register fixed PHY\n"); +- goto err_phy_connect; +- } +- +- phy = of_phy_get_and_connect(priv->netdev, np, +- &ftgmac100_adjust_link); +- if (!phy) { +- dev_err(&pdev->dev, "Failed to connect to fixed PHY\n"); +- of_phy_deregister_fixed_link(np); +- err = -EINVAL; +- goto err_phy_connect; +- } +- +- /* Display what we found */ +- phy_attached_info(phy); +- } else if (np && of_get_property(np, "phy-handle", NULL)) { ++ } else if (np && (of_phy_is_fixed_link(np) || ++ of_get_property(np, "phy-handle", NULL))) { + struct phy_device *phy; + + /* Support "mdio"/"phy" child nodes for ast2400/2500 with + * an embedded MDIO controller. Automatically scan the DTS for + * available PHYs and register them. + */ +- if (of_device_is_compatible(np, "aspeed,ast2400-mac") || +- of_device_is_compatible(np, "aspeed,ast2500-mac")) { ++ if (of_get_property(np, "phy-handle", NULL) && ++ (of_device_is_compatible(np, "aspeed,ast2400-mac") || ++ of_device_is_compatible(np, "aspeed,ast2500-mac"))) { + err = ftgmac100_setup_mdio(netdev); + if (err) + goto err_setup_mdio; +@@ -1995,6 +2028,54 @@ + if (of_device_is_compatible(np, "aspeed,ast2600-mac")) + iowrite32(FTGMAC100_TM_DEFAULT, + priv->base + FTGMAC100_OFFSET_TM); ++ ++ if (of_device_is_compatible(np, "aspeed,ast2700-mac")) { ++ if (netdev->phydev->interface == PHY_INTERFACE_MODE_SGMII) { ++ priv->sgmii = devm_phy_optional_get(&pdev->dev, "sgmii"); ++ if (IS_ERR(priv->sgmii)) { ++ dev_err(priv->dev, "Failed to get sgmii phy (%ld)\n", ++ PTR_ERR(priv->sgmii)); ++ err = PTR_ERR(priv->sgmii); ++ goto err_register_netdev; ++ } ++ } ++ } ++ } ++ ++ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL); ++ if (IS_ERR(priv->rst)) { ++ err = PTR_ERR(priv->rst); ++ goto err_register_netdev; ++ } ++ ++ err = reset_control_assert(priv->rst); ++ if (err) { ++ dev_err(priv->dev, "Failed to reset mac (%d)\n", err); ++ goto err_register_netdev; ++ } ++ usleep_range(10000, 20000); ++ err = reset_control_deassert(priv->rst); ++ if (err) { ++ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err); ++ goto err_register_netdev; ++ } ++ ++ if (priv->sgmii) { ++ /* If using fixed link in dts, sgmii need to be forced */ ++ if (of_phy_is_fixed_link(np)) { ++ err = phy_set_speed(priv->sgmii, netdev->phydev->speed); ++ if (err) { ++ dev_err(priv->dev, "Failed to force sgmii speed\n"); ++ goto err_register_netdev; ++ } ++ } else { ++ /* The phy_init is used to configure Nway */ ++ err = phy_init(priv->sgmii); ++ if (err) { ++ dev_err(priv->dev, "Failed to configure sgmii Nway\n"); ++ goto err_register_netdev; ++ } ++ } + } + + /* Default ring sizes */ +@@ -2021,6 +2102,12 @@ + netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM); + netdev->features |= netdev->hw_features; + ++ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ if (err) { ++ dev_err(&pdev->dev, "64-bit DMA enable failed\n"); ++ goto err_register_netdev; ++ } ++ + /* register network device */ + err = register_netdev(netdev); + if (err) { +diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h +--- a/drivers/net/ethernet/faraday/ftgmac100.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/ethernet/faraday/ftgmac100.h 2025-12-23 10:16:21.145032318 +0000 +@@ -57,6 +57,13 @@ + #define FTGMAC100_OFFSET_RX_RUNT 0xc0 + #define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4 + #define FTGMAC100_OFFSET_RX_COL_LOST 0xc8 ++/* reserved 0xcc - 0x174 */ ++#define FTGMAC100_OFFSET_TXR_BADDR_LOW 0x178 /* ast2700 */ ++#define FTGMAC100_OFFSET_TXR_BADDR_HIGH 0x17c /* ast2700 */ ++#define FTGMAC100_OFFSET_HPTXR_BADDR_LOW 0x180 /* ast2700 */ ++#define FTGMAC100_OFFSET_HPTXR_BADDR_HIGH 0x184 /* ast2700 */ ++#define FTGMAC100_OFFSET_RXR_BADDR_LOW 0x188 /* ast2700 */ ++#define FTGMAC100_OFFSET_RXR_BADDR_HIGH 0x18C /* ast2700 */ + + /* + * Interrupt status register & interrupt enable register +@@ -166,6 +173,7 @@ + #define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16) + #define FTGMAC100_MACCR_RX_BROADPKT (1 << 17) + #define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18) ++#define FTGMAC100_MACCR_RMII_ENABLE BIT(20) /* defined in ast2700 */ + #define FTGMAC100_MACCR_FAST_MODE (1 << 19) + #define FTGMAC100_MACCR_SW_RST (1 << 31) + +@@ -225,6 +233,7 @@ + #define FTGMAC100_TXDES1_TX2FIC (1 << 30) + #define FTGMAC100_TXDES1_TXIC (1 << 31) + ++#define FTGMAC100_TXDES2_TXBUF_BADR_HI GENMASK(18, 16) + /* + * Receive descriptor, aligned to 16 bytes + */ +@@ -271,4 +280,5 @@ + #define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26) + #define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27) + ++#define FTGMAC100_RXDES2_RXBUF_BADR_HI GENMASK(18, 16) + #endif /* __FTGMAC100_H */ +diff --git a/drivers/net/mctp/mctp-pcie-vdm.c b/drivers/net/mctp/mctp-pcie-vdm.c +--- a/drivers/net/mctp/mctp-pcie-vdm.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/net/mctp/mctp-pcie-vdm.c 2025-12-23 10:16:21.981018306 +0000 +@@ -0,0 +1,363 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * mctp-pcie-vdm.c - MCTP-over-PCIe-VDM (DMTF DSP0238) transport binding driver. ++ * ++ * DSP0238 is available at: ++ * https://www.dmtf.org/sites/default/files/standards/documents/DSP0238_1.2.0.pdf ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MCTP_PCIE_VDM_MIN_MTU 64 ++#define MCTP_PCIE_VDM_MAX_MTU 512 ++/* 16byte */ ++#define MCTP_PCIE_VDM_HDR_SIZE 16 ++#define MCTP_PAYLOAD_IC_TYPE_SIZE 1 ++#define MCTP_RECEIVE_PKT_TIMEOUT_MS 5 ++ ++#define MCTP_PCIE_VDM_NET_DEV_TX_QUEUE_LEN 1100 ++#define MCTP_PCIE_VDM_DEV_TX_QUEUE_SIZE 64 ++ ++#define MCTP_PCIE_VDM_FMT_4DW 0x3 ++#define MCTP_PCIE_VDM_TYPE_MSG 0x10 ++#define MCTP_PCIE_VDM_CODE 0x0 ++/* PCIe VDM message code */ ++#define MCTP_PCIE_VDM_MSG_CODE 0x7F ++#define MCTP_PCIE_VDM_VENDOR_ID 0x1AB4 ++/* MCTP message type */ ++#define MCTP_MSG_TYPE_MASK GENMASK(6, 0) ++#define MCTP_PCIE_VDM_MSG_TYPE 0x7E ++ ++#define MCTP_PCIE_SWAP_NET_ENDIAN(arr, len) \ ++ do { \ ++ u32 *p = (u32 *)(arr); \ ++ for (int i = 0; i < (len); i++) { \ ++ p[i] = htonl(p[i]); \ ++ } \ ++ } while (0) ++ ++#define MCTP_PCIE_SWAP_LITTLE_ENDIAN(arr, len) \ ++ do { \ ++ u32 *p = (u32 *)(arr); \ ++ for (int i = 0; i < (len); i++) { \ ++ p[i] = ntohl(p[i]); \ ++ p[i] = cpu_to_le32(p[i]); \ ++ } \ ++ } while (0) ++ ++enum mctp_pcie_vdm_route_type { ++ MCTP_PCIE_VDM_ROUTE_TO_RC = 0, ++ MCTP_PCIE_VDM_ROUTE_BY_ID = 2, ++ MCTP_PCIE_VDM_BROADCAST_FROM_RC = 3, ++}; ++ ++enum mctp_ctrl_command_code { ++ MCTP_CTRL_CMD_SET_ENDPOINT_ID = 0x01, ++ MCTP_CTRL_CMD_GET_ENDPOINT_ID = 0x02, ++ MCTP_CTRL_CMD_PREPARE_ENDPOINT_DISCOVERY = 0x0B, ++ MCTP_CTRL_CMD_ENDPOINT_DISCOVERY = 0x0C, ++ MCTP_CTRL_CMD_DISCOVERY_NOTIFY = 0x0D ++}; ++ ++struct mctp_pcie_vdm_hdr { ++ u32 length : 10, rsvd0 : 2, attr : 2, ep : 1, td : 1, rsvd1 : 4, tc : 3, ++ rsvd2 : 1, route_type : 5, fmt : 2, rsvd3 : 1; ++ u8 msg_code; ++ u8 tag_vdm_code : 4, tag_pad_len : 2, tag_rsvd : 2; ++ u16 pci_req_id; ++ u16 pci_vendor_id; ++ u16 pci_target_id; ++}; ++ ++struct mctp_pcie_vdm_dev { ++ struct device *dev; ++ const struct mctp_pcie_vdm_ops *callback_ops; ++}; ++ ++static const struct mctp_pcie_vdm_hdr mctp_pcie_vdm_hdr_template = { ++ .fmt = MCTP_PCIE_VDM_FMT_4DW, ++ .route_type = MCTP_PCIE_VDM_TYPE_MSG | MCTP_PCIE_VDM_ROUTE_BY_ID, ++ .tag_vdm_code = MCTP_PCIE_VDM_CODE, ++ .msg_code = MCTP_PCIE_VDM_MSG_CODE, ++ .pci_vendor_id = MCTP_PCIE_VDM_VENDOR_ID, ++ .attr = 0, ++}; ++ ++static void mctp_pcie_vdm_display_skb_buff_data(struct sk_buff *skb) ++{ ++ int i = 0; ++ ++ while ((i + 4) < skb->len) { ++ pr_debug("%02x %02x %02x %02x\n", skb->data[i], ++ skb->data[i + 1], skb->data[i + 2], skb->data[i + 3]); ++ i += 4; ++ } ++ ++ char buf[16] = { 0 }; ++ char *p = buf; ++ ++ while (i < skb->len) { ++ p += snprintf(p, sizeof(buf) - (p - buf), "%02x ", ++ skb->data[i]); ++ i++; ++ } ++ pr_debug("%s\n", buf); ++} ++ ++static int mctp_pcie_vdm_xmit(struct net_device *ndev, struct sk_buff *skb) ++{ ++ struct net_device_stats *stats; ++ struct mctp_pcie_vdm_hdr *hdr; ++ struct mctp_pcie_vdm_dev *vdm_dev; ++ u8 *hdr_byte; ++ u16 payload_len_dw; ++ u16 payload_len_byte; ++ int rc; ++ ++ stats = &ndev->stats; ++ vdm_dev = netdev_priv(ndev); ++ hdr = (struct mctp_pcie_vdm_hdr *)skb->data; ++ hdr_byte = skb->data; ++ payload_len_dw = (ALIGN(skb->len, sizeof(u32)) - MCTP_PCIE_VDM_HDR_SIZE) / sizeof(u32); ++ payload_len_byte = skb->len - MCTP_PCIE_VDM_HDR_SIZE; ++ ++ hdr->length = payload_len_dw; ++ hdr->tag_pad_len = ++ ALIGN(payload_len_byte, sizeof(u32)) - payload_len_byte; ++ pr_debug("%s: skb len %d pad len %d\n", __func__, skb->len, ++ hdr->tag_pad_len); ++ MCTP_PCIE_SWAP_NET_ENDIAN((u32 *)hdr, ++ sizeof(struct mctp_pcie_vdm_hdr) / sizeof(u32)); ++ ++ mctp_pcie_vdm_display_skb_buff_data(skb); ++ rc = vdm_dev->callback_ops->send_packet(vdm_dev->dev, skb->data, payload_len_dw * sizeof(u32)); ++ ++ if (rc) { ++ pr_err("%s: failed to send packet, rc %d\n", __func__, rc); ++ stats->tx_errors++; ++ } else { ++ stats->tx_packets++; ++ stats->tx_bytes += (skb->len - sizeof(struct mctp_pcie_vdm_hdr)); ++ } ++ return rc; ++} ++ ++static netdev_tx_t mctp_pcie_vdm_start_xmit(struct sk_buff *skb, ++ struct net_device *ndev) ++{ ++ int rc; ++ netdev_tx_t ret; ++ ++ pr_debug("%s: skb len %u\n", __func__, skb->len); ++ ++ if (skb) { ++ rc = mctp_pcie_vdm_xmit(ndev, skb); ++ if (rc) { ++ pr_err("%s: failed to send packet, rc %d\n", __func__, rc); ++ ret = NETDEV_TX_BUSY; ++ } else { ++ ret = NETDEV_TX_OK; ++ kfree_skb(skb); ++ } ++ } ++ return ret; ++} ++ ++static void mctp_pcie_vdm_uninit(struct net_device *ndev) ++{ ++ struct mctp_pcie_vdm_dev *vdm_dev; ++ ++ vdm_dev = netdev_priv(ndev); ++ pr_info("%s: uninitializing vdm_dev %s\n", __func__, ++ ndev->name); ++ vdm_dev->callback_ops->uninit(vdm_dev->dev); ++} ++ ++static int mctp_pcie_vdm_hdr_create(struct sk_buff *skb, ++ struct net_device *ndev, ++ unsigned short type, const void *daddr, ++ const void *saddr, unsigned int len) ++{ ++ u8 dest_addr[3] = {0}; ++ struct mctp_pcie_vdm_hdr *hdr = ++ (struct mctp_pcie_vdm_hdr *)skb_push(skb, sizeof(*hdr)); ++ ++ pr_debug("%s type %d len %d\n", __func__, type, len); ++ memcpy(hdr, &mctp_pcie_vdm_hdr_template, sizeof(*hdr)); ++ if (daddr) { ++ memcpy(dest_addr, (u8 *)daddr, sizeof(dest_addr)); ++ hdr->route_type |= dest_addr[0] & GENMASK(2, 0); ++ hdr->pci_target_id = dest_addr[1] << 8 | dest_addr[2]; ++ pr_debug("%s dst route %d addr %d\n", __func__, hdr->route_type, hdr->pci_target_id); ++ } ++ ++ if (saddr) { ++ pr_debug("%s src addr %d\n", __func__, *(u16 *)saddr); ++ hdr->pci_req_id = *(u16 *)saddr; ++ } ++ ++ return 0; ++} ++ ++static const struct net_device_ops mctp_pcie_vdm_net_ops = { ++ .ndo_start_xmit = mctp_pcie_vdm_start_xmit, ++ .ndo_uninit = mctp_pcie_vdm_uninit, ++}; ++ ++static const struct header_ops mctp_pcie_vdm_net_hdr_ops = { ++ .create = mctp_pcie_vdm_hdr_create, ++}; ++ ++static void mctp_pcie_vdm_net_setup(struct net_device *ndev) ++{ ++ ndev->type = ARPHRD_MCTP; ++ ++ ndev->mtu = MCTP_PCIE_VDM_MIN_MTU; ++ ndev->min_mtu = MCTP_PCIE_VDM_MIN_MTU; ++ ndev->max_mtu = MCTP_PCIE_VDM_MAX_MTU; ++ ndev->tx_queue_len = MCTP_PCIE_VDM_NET_DEV_TX_QUEUE_LEN; ++ ndev->addr_len = 3; //PCIe bdf is 2bytes + 1byte route type ++ ndev->hard_header_len = sizeof(struct mctp_pcie_vdm_hdr); ++ ++ ndev->netdev_ops = &mctp_pcie_vdm_net_ops; ++ ndev->header_ops = &mctp_pcie_vdm_net_hdr_ops; ++} ++ ++static int mctp_pcie_vdm_add_net_dev(struct net_device **dev) ++{ ++ struct net_device *ndev = alloc_netdev(sizeof(struct mctp_pcie_vdm_dev), ++ "mctppci%d", NET_NAME_UNKNOWN, ++ mctp_pcie_vdm_net_setup); ++ ++ if (!ndev) { ++ pr_err("%s: failed to allocate net device\n", __func__); ++ return -ENOMEM; ++ } ++ dev_net_set(ndev, current->nsproxy->net_ns); ++ ++ *dev = ndev; ++ int rc; ++ ++ rc = mctp_register_netdev(ndev, NULL, MCTP_PHYS_BINDING_PCIE_VDM); ++ if (rc) { ++ pr_err("%s: failed to register net device\n", __func__); ++ free_netdev(ndev); ++ return rc; ++ } ++ return rc; ++} ++ ++void mctp_pcie_vdm_receive_packet(struct net_device *ndev) ++{ ++ struct mctp_pcie_vdm_dev *vdm_dev; ++ u8 *packet; ++ ++ vdm_dev = netdev_priv(ndev); ++ packet = vdm_dev->callback_ops->recv_packet(vdm_dev->dev); ++ ++ while (!IS_ERR(packet)) { ++ MCTP_PCIE_SWAP_LITTLE_ENDIAN((u32 *)packet, ++ sizeof(struct mctp_pcie_vdm_hdr) / sizeof(u32)); ++ struct mctp_pcie_vdm_hdr *vdm_hdr = (struct mctp_pcie_vdm_hdr *)packet; ++ struct mctp_skb_cb *cb; ++ struct net_device_stats *stats; ++ struct sk_buff *skb; ++ u16 len; ++ int net_status; ++ ++ stats = &ndev->stats; ++ len = vdm_hdr->length * sizeof(u32) - ++ vdm_hdr->tag_pad_len; ++ len += (MCTP_PCIE_VDM_HDR_SIZE - sizeof(struct mctp_pcie_vdm_hdr)); ++ skb = netdev_alloc_skb(ndev, len); ++ pr_debug("%s: received packet size: %d\n", __func__, ++ len); ++ ++ if (!skb) { ++ stats->rx_errors++; ++ pr_err("%s: failed to alloc skb\n", __func__); ++ continue; ++ } ++ ++ skb->protocol = htons(ETH_P_MCTP); ++ /* put data into tail sk buff */ ++ skb_put_data(skb, &packet[sizeof(struct mctp_pcie_vdm_hdr)], len); ++ mctp_pcie_vdm_display_skb_buff_data(skb); ++ ++ cb = __mctp_cb(skb); ++ cb->halen = 3; // route type | bdf address ++ cb->haddr[0] = vdm_hdr->route_type; ++ // address is also converted to little-endian, but we want to keep it as big-endian ++ // because kernel network layer assumes address in big-endian format ++ cb->haddr[1] = vdm_hdr->pci_req_id & 0xFF; ++ cb->haddr[2] = vdm_hdr->pci_req_id >> 8; ++ ++ net_status = netif_rx(skb); ++ if (net_status == NET_RX_SUCCESS) { ++ stats->rx_packets++; ++ stats->rx_bytes += len; ++ } else { ++ stats->rx_dropped++; ++ } ++ ++ vdm_dev->callback_ops->free_packet(packet); ++ packet = vdm_dev->callback_ops->recv_packet(vdm_dev->dev); ++ } ++} ++ ++struct net_device *mctp_pcie_vdm_add_dev(struct device *dev, ++ const struct mctp_pcie_vdm_ops *ops) ++{ ++ struct net_device *ndev; ++ struct mctp_pcie_vdm_dev *vdm_dev; ++ int rc; ++ ++ rc = mctp_pcie_vdm_add_net_dev(&ndev); ++ if (rc) { ++ pr_err("%s: failed to add net device\n", __func__); ++ return ERR_PTR(rc); ++ } ++ ++ vdm_dev = netdev_priv(ndev); ++ vdm_dev->dev = dev; ++ vdm_dev->callback_ops = ops; ++ ++ return ndev; ++} ++EXPORT_SYMBOL_GPL(mctp_pcie_vdm_add_dev); ++ ++void mctp_pcie_vdm_remove_dev(struct net_device *vdm_dev) ++{ ++ pr_debug("%s: removing vdm_dev %s\n", __func__, vdm_dev->name); ++ ++ if (vdm_dev) { ++ mctp_unregister_netdev(vdm_dev); ++ free_netdev(vdm_dev); ++ } ++} ++EXPORT_SYMBOL_GPL(mctp_pcie_vdm_remove_dev); +diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c +--- a/drivers/net/mdio/mdio-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/net/mdio/mdio-aspeed.c 2025-12-23 10:16:20.939035770 +0000 +@@ -62,6 +62,8 @@ + | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data); + + iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); ++ /* Add dummy read to ensure triggering mdio controller */ ++ (void)ioread32(ctx->base + ASPEED_MDIO_CTRL); + + return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, + !(ctrl & ASPEED_MDIO_CTRL_FIRE), +diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig +--- a/drivers/pci/controller/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pci/controller/Kconfig 2025-12-23 10:16:09.638225214 +0000 +@@ -47,6 +47,15 @@ + + If unsure, say Y if you have an Apple Silicon system. + ++config PCIE_ASPEED ++ bool "ASPEED PCIe controller" ++ depends on PCI ++ depends on OF || COMPILE_TEST ++ select PCI_MSI_ARCH_FALLBACKS ++ help ++ Say Y here if you want PCIe controller support on ++ ASPEED SoCs. ++ + config PCI_VERSATILE + bool "ARM Versatile PB PCI controller" + depends on ARCH_VERSATILE || COMPILE_TEST +diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile +--- a/drivers/pci/controller/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pci/controller/Makefile 2025-12-23 10:16:13.816155152 +0000 +@@ -39,6 +39,7 @@ + obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o + obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o + obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o ++obj-$(CONFIG_PCIE_ASPEED) += pcie-aspeed.o + + # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW + obj-y += dwc/ +diff --git a/drivers/pci/controller/pcie-aspeed.c b/drivers/pci/controller/pcie-aspeed.c +--- a/drivers/pci/controller/pcie-aspeed.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/pci/controller/pcie-aspeed.c 2025-12-23 10:16:21.109032921 +0000 +@@ -0,0 +1,1185 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * PCIe host controller driver for ASPEED PCIe Bridge ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../pci.h" ++ ++#define MAX_MSI_HOST_IRQS 64 ++ ++/* AST2600 AHBC Registers */ ++#define AHBC_KEY 0x00 ++#define AHBC_UNLOCK 0xAEED1A03 ++#define AHBC_ADDR_MAPPING 0x8C ++#define PCIE_RC_MEMORY_EN BIT(5) ++ ++/* AST2600 PCIe Host Controller Registers */ ++#define PEHR_MISC_10 0x10 ++#define DATALINK_REPORT_CAPABLE BIT(4) ++#define PEHR_MISC_14 0x14 ++#define HOTPLUG_CAPABLE_ENABLE BIT(6) ++#define HOTPLUG_SURPRISE_ENABLE BIT(5) ++#define ATTENTION_BUTTON_ENABLE BIT(0) ++#define PEHR_GLOBAL 0x30 ++#define RC_SYNC_RESET_DISABLE BIT(20) ++#define PCIE_RC_SLOT_ENABLE BIT(1) ++#define ROOT_COMPLEX_ID(x) ((x) << 4) ++#define PEHR_LOCK 0x7C ++#define PCIE_UNLOCK 0xa8 ++#define PEHR_LINK 0xC0 ++#define PCIE_LINK_STS BIT(5) ++ ++/* AST2600 H2X Controller Registers */ ++/* Common Registers*/ ++#define H2X_INT_STS 0x08 ++#define PCIE_TX_IDLE_CLEAR BIT(0) ++#define H2X_TX_DESC0 0x10 ++#define H2X_TX_DESC1 0x14 ++#define H2X_TX_DESC2 0x18 ++#define H2X_TX_DESC3 0x1C ++#define H2X_TX_DESC_DATA 0x20 ++#define H2X_STS 0x24 ++#define PCIE_TX_IDLE BIT(31) ++#define PCIE_STATUS_OF_TX GENMASK(25, 24) ++#define PCIE_RC_L_TX_COMPLETE BIT(24) ++#define PCIE_RC_H_TX_COMPLETE BIT(25) ++#define PCIE_TRIGGER_TX BIT(0) ++#define H2X_AHB_ADDR_CONFIG0 0x60 ++#define H2X_AHB_ADDR_CONFIG1 0x64 ++#define H2X_AHB_ADDR_CONFIG2 0x68 ++/* Device Registers */ ++#define H2X_DEV_CTRL 0x00 ++#define PCIE_RX_DMA_EN BIT(9) ++#define PCIE_RX_LINEAR BIT(8) ++#define PCIE_RX_MSI_SEL BIT(7) ++#define PCIE_RX_MSI_EN BIT(6) ++#define PCIE_UNLOCK_RX_BUFF BIT(4) ++#define PCIE_Wait_RX_TLP_CLR BIT(2) ++#define PCIE_RC_RX_ENABLE BIT(1) ++#define PCIE_RC_ENABLE BIT(0) ++#define H2X_DEV_STS 0x08 ++#define PCIE_RC_RX_DONE_ISR BIT(4) ++#define H2X_DEV_RX_DESC_DATA 0x0C ++#define H2X_DEV_RX_DESC1 0x14 ++#define H2X_DEV_TX_TAG 0x3C ++ ++/* AST2700 H2X */ ++#define H2X_CTRL 0x00 ++#define H2X_BRIDGE_EN BIT(0) ++#define H2X_BRIDGE_DIRECT_EN BIT(1) ++#define H2X_CFGE_INT_STS 0x08 ++#define CFGE_TX_IDLE BIT(0) ++#define CFGE_RX_BUSY BIT(1) ++#define H2X_CFGI_TLP 0x20 ++#define H2X_CFGI_WR_DATA 0x24 ++#define H2X_CFGI_CTRL 0x28 ++#define CFGI_TLP_FIRE BIT(0) ++#define H2X_CFGI_RET_DATA 0x2C ++#define H2X_CFGE_TLP_1ST 0x30 ++#define H2X_CFGE_TLP_NEXT 0x34 ++#define H2X_CFGE_CTRL 0x38 ++#define CFGE_TLP_FIRE BIT(0) ++#define H2X_CFGE_RET_DATA 0x3C ++#define H2X_REMAP_PREF_ADDR 0x70 ++#define H2X_REMAP_DIRECT_ADDR 0x78 ++ ++/* AST2700 PEHR */ ++#define PEHR_VID_DID 0x00 ++#define PEHR_MISC_44 0x44 ++#define ENABLE_SLOT_CAP BIT(12) ++#define PEHR_MISC_38 0x38 ++#define DATALINK_REPORT_CAP BIT(20) ++#define PEHR_MISC_3C 0x3C ++#define PEHR_MISC_58 0x58 ++#define LOCAL_SCALE_SUP BIT(0) ++#define PEHR_MISC_5C 0x5C ++#define PEHR_MISC_60 0x60 ++#define PORT_TPYE GENMASK(7, 4) ++#define PORT_TYPE_ROOT BIT(2) ++#define PEHR_MISC_70 0x70 ++#define PEHR_MISC_78 0x78 ++#define PEHR_MISC_1B8 0x1B8 ++#define SW_ATT_BTN BIT(0) ++#define PEHR_MISC_344 0x344 ++#define LINK_STATUS_GEN2 BIT(18) ++#define PEHR_MISC_358 0x358 ++#define LINK_STATUS_GEN4 BIT(8) ++ ++/* AST2700 SCU */ ++#define SCU_60 0x60 ++#define RC_E2M_PATH_EN BIT(0) ++#define RC_H2XS_PATH_EN BIT(16) ++#define RC_H2XD_PATH_EN BIT(17) ++#define RC_H2XX_PATH_EN BIT(18) ++#define RC_UPSTREAM_MEM_EN BIT(19) ++#define SCU_64 0x64 ++#define SCU_70 0x70 ++#define SCU_78 0x78 ++ ++/* TLP configuration type 0 and type 1 */ ++#define CRG_READ_FMTTYPE(type) (0x04000000 | (type << 24)) ++#define CRG_WRITE_FMTTYPE(type) (0x44000000 | (type << 24)) ++#define CRG_PAYLOAD_SIZE 0x01 /* 1 DWORD */ ++#define TLP_COMP_STATUS(s) (((s) >> 13) & 7) ++ ++struct aspeed_pcie_rc_platform { ++ int (*setup)(struct platform_device *pdev); ++ /* Interrupt Register Offset */ ++ int reg_intx_en; ++ int reg_intx_sts; ++ int reg_msi_en; ++ int reg_msi_sts; ++ int msi_address; ++}; ++ ++struct aspeed_pcie { ++ struct pci_host_bridge *host; ++ struct device *dev; ++ void __iomem *reg; ++ struct regmap *ahbc; ++ struct regmap *cfg; ++ struct regmap *pciephy; ++ struct clk *clock; ++ const struct aspeed_pcie_rc_platform *platform; ++ ++ int domain; ++ u8 tx_tag; ++ int host_bus_num; ++ ++ struct reset_control *h2xrst; ++ struct reset_control *perst; ++ ++ struct irq_domain *irq_domain; ++ struct irq_domain *dev_domain; ++ struct irq_domain *msi_domain; ++ /* Protects MSI IRQ allocation and release */ ++ struct mutex lock; ++ ++ int hotplug_event; ++ struct gpio_desc *perst_ep_in; ++ struct gpio_desc *perst_rc_out; ++ struct gpio_desc *perst_owner; ++ struct delayed_work rst_dwork; ++ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_HOST_IRQS); ++}; ++ ++static void aspeed_pcie_intx_ack_irq(struct irq_data *d) ++{ ++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d); ++ int intx_en = pcie->platform->reg_intx_en; ++ ++ writel(readl(pcie->reg + intx_en) | BIT(d->hwirq), pcie->reg + intx_en); ++} ++ ++static void aspeed_pcie_intx_mask_irq(struct irq_data *d) ++{ ++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d); ++ int intx_en = pcie->platform->reg_intx_en; ++ ++ writel(readl(pcie->reg + intx_en) & ~BIT(d->hwirq), pcie->reg + intx_en); ++} ++ ++static void aspeed_pcie_intx_unmask_irq(struct irq_data *d) ++{ ++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d); ++ int intx_en = pcie->platform->reg_intx_en; ++ ++ writel(readl(pcie->reg + intx_en) | BIT(d->hwirq), pcie->reg + intx_en); ++} ++ ++static struct irq_chip aspeed_intx_irq_chip = { ++ .name = "ASPEED:IntX", ++ .irq_ack = aspeed_pcie_intx_ack_irq, ++ .irq_mask = aspeed_pcie_intx_mask_irq, ++ .irq_unmask = aspeed_pcie_intx_unmask_irq, ++}; ++ ++static int aspeed_pcie_intx_map(struct irq_domain *domain, unsigned int irq, ++ irq_hw_number_t hwirq) ++{ ++ irq_set_chip_and_handler(irq, &aspeed_intx_irq_chip, handle_level_irq); ++ irq_set_chip_data(irq, domain->host_data); ++ irq_set_status_flags(irq, IRQ_LEVEL); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops aspeed_intx_domain_ops = { ++ .map = aspeed_pcie_intx_map, ++}; ++ ++static irqreturn_t aspeed_pcie_intr_handler(int irq, void *dev_id) ++{ ++ struct aspeed_pcie *pcie = dev_id; ++ const struct aspeed_pcie_rc_platform *platform = pcie->platform; ++ unsigned long status; ++ unsigned long intx; ++ u32 bit; ++ int i; ++ ++ intx = readl(pcie->reg + platform->reg_intx_sts) & 0xf; ++ if (intx) { ++ for_each_set_bit(bit, &intx, PCI_NUM_INTX) ++ generic_handle_domain_irq(pcie->irq_domain, bit); ++ } ++ ++ if (IS_ENABLED(CONFIG_PCI_MSI)) { ++ for (i = 0; i < 2; i++) { ++ status = readl(pcie->reg + platform->reg_msi_sts + (i * 4)); ++ writel(status, pcie->reg + platform->reg_msi_sts + (i * 4)); ++ /* Workaround: AST2700 MSI needs to cleat status twice */ ++ if (of_device_is_compatible(pcie->dev->of_node, "aspeed,ast2700-pcie")) ++ writel(status, pcie->reg + platform->reg_msi_sts + (i * 4)); ++ if (!status) ++ continue; ++ ++ for_each_set_bit(bit, &status, 32) { ++ if (i) ++ bit += 32; ++ generic_handle_domain_irq(pcie->dev_domain, bit); ++ } ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_ast2600_rd_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ struct aspeed_pcie *pcie = bus->sysdata; ++ u32 bdf_offset; ++ int rx_done_fail = 0, slot = PCI_SLOT(devfn); ++ u32 cfg_val, isr, type = 0; ++ u32 link_sts = 0; ++ int ret; ++ ++ /* Driver may set unlock RX buffere before triggering next TX config */ ++ writel(PCIE_UNLOCK_RX_BUFF | readl(pcie->reg + H2X_DEV_CTRL), ++ pcie->reg + H2X_DEV_CTRL); ++ ++ if (bus->number == pcie->host_bus_num && slot != 0 && slot != 8) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ type = (bus->number > pcie->host_bus_num); ++ ++ if (type) { ++ regmap_read(pcie->pciephy, PEHR_LINK, &link_sts); ++ if (!(link_sts & PCIE_LINK_STS)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ } ++ ++ bdf_offset = ((bus->number) << 24) | (PCI_SLOT(devfn) << 19) | ++ (PCI_FUNC(devfn) << 16) | (where & ~3); ++ ++ pcie->tx_tag %= 0x7; ++ ++ regmap_write(pcie->cfg, H2X_TX_DESC0, 0x04000001 | (type << 24)); ++ regmap_write(pcie->cfg, H2X_TX_DESC1, 0x0000200f | (pcie->tx_tag << 8)); ++ regmap_write(pcie->cfg, H2X_TX_DESC2, bdf_offset); ++ regmap_write(pcie->cfg, H2X_TX_DESC3, 0x00000000); ++ ++ regmap_write_bits(pcie->cfg, H2X_STS, PCIE_TRIGGER_TX, PCIE_TRIGGER_TX); ++ ++ ret = regmap_read_poll_timeout(pcie->cfg, H2X_STS, cfg_val, ++ (cfg_val & PCIE_TX_IDLE), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CR tx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), cfg_val); ++ goto out; ++ } ++ ++ regmap_write_bits(pcie->cfg, H2X_INT_STS, PCIE_TX_IDLE_CLEAR, ++ PCIE_TX_IDLE_CLEAR); ++ ++ regmap_read(pcie->cfg, H2X_STS, &cfg_val); ++ switch (cfg_val & PCIE_STATUS_OF_TX) { ++ case PCIE_RC_L_TX_COMPLETE: ++ case PCIE_RC_H_TX_COMPLETE: ++ ret = readl_poll_timeout(pcie->reg + H2X_DEV_STS, isr, ++ (isr & PCIE_RC_RX_DONE_ISR), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CR rx timeoutsts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), isr); ++ rx_done_fail = 1; ++ *val = ~0; ++ } ++ if (!rx_done_fail) { ++ if (readl(pcie->reg + H2X_DEV_RX_DESC1) & BIT(13)) ++ *val = ~0; ++ else ++ *val = readl(pcie->reg + H2X_DEV_RX_DESC_DATA); ++ } ++ ++ writel(PCIE_UNLOCK_RX_BUFF | readl(pcie->reg + H2X_DEV_CTRL), ++ pcie->reg + H2X_DEV_CTRL); ++ break; ++ case PCIE_STATUS_OF_TX: ++ *val = ~0; ++ break; ++ default: ++ regmap_read(pcie->cfg, H2X_DEV_RX_DESC_DATA, &cfg_val); ++ *val = cfg_val; ++ break; ++ } ++ ++ switch (size) { ++ case 1: ++ *val = (*val >> ((where & 3) * 8)) & 0xff; ++ break; ++ case 2: ++ *val = (*val >> ((where & 2) * 8)) & 0xffff; ++ break; ++ } ++ ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { ++ if (where == (0x80 + PCI_EXP_SLTSTA) && ++ bus->number == pcie->host_bus_num && ++ PCI_SLOT(devfn) == 0x8 && ++ PCI_FUNC(devfn) == 0x0 && ++ pcie->hotplug_event) ++ *val |= PCI_EXP_SLTSTA_ABP; ++ } ++ ++ ret = PCIBIOS_SUCCESSFUL; ++out: ++ writel(readl(pcie->reg + H2X_DEV_STS), pcie->reg + H2X_DEV_STS); ++ pcie->tx_tag++; ++ return ret; ++} ++ ++static int aspeed_ast2600_wr_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ u32 type = 0; ++ u32 shift = 8 * (where & 3); ++ u32 bdf_offset; ++ u8 byte_en = 0; ++ struct aspeed_pcie *pcie = bus->sysdata; ++ u32 isr, cfg_val; ++ int ret; ++ ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { ++ if (where == (0x80 + PCI_EXP_SLTSTA) && ++ bus->number == pcie->host_bus_num && ++ PCI_SLOT(devfn) == 0x8 && ++ PCI_FUNC(devfn) == 0x0 && ++ pcie->hotplug_event && ++ (val & PCI_EXP_SLTSTA_ABP)) { ++ pcie->hotplug_event = 0; ++ return PCIBIOS_SUCCESSFUL; ++ } ++ } ++ ++ /* Driver may set unlock RX buffere before triggering next TX config */ ++ writel(PCIE_UNLOCK_RX_BUFF | readl(pcie->reg + H2X_DEV_CTRL), ++ pcie->reg + H2X_DEV_CTRL); ++ ++ switch (size) { ++ case 1: ++ byte_en = 1 << (where % 4); ++ val = (val & 0xff) << shift; ++ break; ++ case 2: ++ byte_en = 0x3 << (2 * ((where >> 1) % 2)); ++ val = (val & 0xffff) << shift; ++ break; ++ default: ++ byte_en = 0xf; ++ break; ++ } ++ ++ type = (bus->number > pcie->host_bus_num); ++ ++ bdf_offset = (bus->number << 24) | (PCI_SLOT(devfn) << 19) | ++ (PCI_FUNC(devfn) << 16) | (where & ~3); ++ pcie->tx_tag %= 0x7; ++ ++ regmap_write(pcie->cfg, H2X_TX_DESC0, 0x44000001 | (type << 24)); ++ regmap_write(pcie->cfg, H2X_TX_DESC1, ++ 0x00002000 | (pcie->tx_tag << 8) | byte_en); ++ regmap_write(pcie->cfg, H2X_TX_DESC2, bdf_offset); ++ regmap_write(pcie->cfg, H2X_TX_DESC3, 0x00000000); ++ regmap_write(pcie->cfg, H2X_TX_DESC_DATA, val); ++ ++ regmap_write_bits(pcie->cfg, H2X_STS, PCIE_TRIGGER_TX, PCIE_TRIGGER_TX); ++ ++ ret = regmap_read_poll_timeout(pcie->cfg, H2X_STS, cfg_val, ++ (cfg_val & PCIE_TX_IDLE), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CT tx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), cfg_val); ++ ret = PCIBIOS_SET_FAILED; ++ goto out; ++ } ++ ++ regmap_write_bits(pcie->cfg, H2X_INT_STS, PCIE_TX_IDLE_CLEAR, ++ PCIE_TX_IDLE_CLEAR); ++ ++ regmap_read(pcie->cfg, H2X_STS, &cfg_val); ++ switch (cfg_val & PCIE_STATUS_OF_TX) { ++ case PCIE_RC_L_TX_COMPLETE: ++ case PCIE_RC_H_TX_COMPLETE: ++ ret = readl_poll_timeout(pcie->reg + H2X_DEV_STS, isr, ++ (isr & PCIE_RC_RX_DONE_ISR), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CT rx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), isr); ++ ret = PCIBIOS_SET_FAILED; ++ goto out; ++ } ++ break; ++ } ++ ret = PCIBIOS_SUCCESSFUL; ++out: ++ writel(readl(pcie->reg + H2X_DEV_STS), pcie->reg + H2X_DEV_STS); ++ pcie->tx_tag++; ++ return ret; ++} ++ ++static bool aspeed_ast2700_get_link(struct aspeed_pcie *pcie) ++{ ++ u32 reg; ++ bool link; ++ ++ if (pcie->domain == 2) { ++ regmap_read(pcie->pciephy, PEHR_MISC_344, ®); ++ link = !!(reg & LINK_STATUS_GEN2); ++ } else { ++ regmap_read(pcie->pciephy, PEHR_MISC_358, ®); ++ link = !!(reg & LINK_STATUS_GEN4); ++ } ++ ++ return link; ++} ++ ++static int aspeed_ast2700_rd_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ struct aspeed_pcie *pcie = bus->sysdata; ++ u32 bdf_offset, status; ++ u8 type; ++ int ret; ++ ++ if ((bus->number == pcie->host_bus_num && devfn != 0)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ if (bus->number == pcie->host_bus_num) { ++ /* Internal access to bridge */ ++ writel(0xF << 16 | (where & ~3), pcie->reg + H2X_CFGI_TLP); ++ writel(CFGI_TLP_FIRE, pcie->reg + H2X_CFGI_CTRL); ++ *val = readl(pcie->reg + H2X_CFGI_RET_DATA); ++ } else { ++ if (!aspeed_ast2700_get_link(pcie)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ bdf_offset = ((bus->number) << 24) | (PCI_SLOT(devfn) << 19) | ++ (PCI_FUNC(devfn) << 16) | (where & ~3); ++ ++ pcie->tx_tag %= 0xF; ++ ++ type = (bus->number == (pcie->host_bus_num + 1)) ? ++ PCI_HEADER_TYPE_NORMAL : ++ PCI_HEADER_TYPE_BRIDGE; ++ ++ writel(CRG_READ_FMTTYPE(type) | CRG_PAYLOAD_SIZE, pcie->reg + H2X_CFGE_TLP_1ST); ++ writel(0x40100F | (pcie->tx_tag << 8), pcie->reg + H2X_CFGE_TLP_NEXT); ++ writel(bdf_offset, pcie->reg + H2X_CFGE_TLP_NEXT); ++ writel(CFGE_TX_IDLE | CFGE_RX_BUSY, pcie->reg + H2X_CFGE_INT_STS); ++ writel(CFGE_TLP_FIRE, pcie->reg + H2X_CFGE_CTRL); ++ ++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status, ++ (status & CFGE_TX_IDLE), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CR tx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), status); ++ goto out; ++ } ++ ++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status, ++ (status & CFGE_RX_BUSY), 0, 50000); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CR rx timeoutsts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), status); ++ goto out; ++ } ++ *val = readl(pcie->reg + H2X_CFGE_RET_DATA); ++ } ++ ++ switch (size) { ++ case 1: ++ *val = (*val >> ((where & 3) * 8)) & 0xff; ++ break; ++ case 2: ++ *val = (*val >> ((where & 2) * 8)) & 0xffff; ++ break; ++ } ++ ++ writel(status, pcie->reg + H2X_CFGE_INT_STS); ++ pcie->tx_tag++; ++ return PCIBIOS_SUCCESSFUL; ++out: ++ *val = ~0; ++ writel(status, pcie->reg + H2X_CFGE_INT_STS); ++ pcie->tx_tag++; ++ return PCIBIOS_SET_FAILED; ++} ++ ++static int aspeed_ast2700_wr_conf(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ struct aspeed_pcie *pcie = bus->sysdata; ++ u32 shift = 8 * (where & 3); ++ u8 byte_en; ++ u32 bdf_offset, status, type; ++ int ret; ++ ++ if ((bus->number == pcie->host_bus_num && devfn != 0)) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ switch (size) { ++ case 1: ++ byte_en = 1 << (where % 4); ++ val = (val & 0xff) << shift; ++ break; ++ case 2: ++ byte_en = 0x3 << (2 * ((where >> 1) % 2)); ++ val = (val & 0xffff) << shift; ++ break; ++ default: ++ byte_en = 0xf; ++ break; ++ } ++ ++ if (bus->number == pcie->host_bus_num) { ++ /* Internal access to bridge */ ++ writel(0x100000 | byte_en << 16 | (where & ~3), pcie->reg + H2X_CFGI_TLP); ++ writel(val, pcie->reg + H2X_CFGI_WR_DATA); ++ writel(CFGI_TLP_FIRE, pcie->reg + H2X_CFGI_CTRL); ++ } else { ++ if (!aspeed_ast2700_get_link(pcie)) ++ return PCIBIOS_SET_FAILED; ++ ++ bdf_offset = (bus->number << 24) | (PCI_SLOT(devfn) << 19) | ++ (PCI_FUNC(devfn) << 16) | (where & ~3); ++ pcie->tx_tag %= 0xF; ++ ++ type = (bus->number == (pcie->host_bus_num + 1)) ? ++ PCI_HEADER_TYPE_NORMAL : ++ PCI_HEADER_TYPE_BRIDGE; ++ ++ writel(CRG_WRITE_FMTTYPE(type) | CRG_PAYLOAD_SIZE, pcie->reg + H2X_CFGE_TLP_1ST); ++ writel(0x401000 | (pcie->tx_tag << 8) | byte_en, pcie->reg + H2X_CFGE_TLP_NEXT); ++ writel(bdf_offset, pcie->reg + H2X_CFGE_TLP_NEXT); ++ writel(val, pcie->reg + H2X_CFGE_TLP_NEXT); ++ writel(CFGE_TX_IDLE | CFGE_RX_BUSY, pcie->reg + H2X_CFGE_INT_STS); ++ writel(CFGE_TLP_FIRE, pcie->reg + H2X_CFGE_CTRL); ++ ++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status, ++ (status & CFGE_TX_IDLE), 0, 50); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CT tx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), status); ++ ret = PCIBIOS_SET_FAILED; ++ goto out; ++ } ++ ++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status, ++ (status & CFGE_RX_BUSY), 0, 50000); ++ if (ret) { ++ dev_err(pcie->dev, ++ "[%X:%02X:%02X.%02X]CT rx timeout sts: 0x%08x\n", ++ pcie->domain, bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), status); ++ ret = PCIBIOS_SET_FAILED; ++ goto out; ++ } ++ ++ (void)readl(pcie->reg + H2X_CFGE_RET_DATA); ++ } ++ ret = PCIBIOS_SUCCESSFUL; ++out: ++ writel(status, pcie->reg + H2X_CFGE_INT_STS); ++ pcie->tx_tag++; ++ return ret; ++} ++ ++static struct pci_ops aspeed_ast2600_pcie_ops = { ++ .read = aspeed_ast2600_rd_conf, ++ .write = aspeed_ast2600_wr_conf, ++}; ++ ++static struct pci_ops aspeed_ast2700_pcie_ops = { ++ .read = aspeed_ast2700_rd_conf, ++ .write = aspeed_ast2700_wr_conf, ++}; ++ ++#ifdef CONFIG_PCI_MSI ++static void aspeed_msi_compose_msi_msg(struct irq_data *data, ++ struct msi_msg *msg) ++{ ++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data); ++ ++ msg->address_hi = 0; ++ msg->address_lo = pcie->platform->msi_address; ++ msg->data = data->hwirq; ++} ++ ++static int aspeed_msi_set_affinity(struct irq_data *irq_data, ++ const struct cpumask *mask, bool force) ++{ ++ return -EINVAL; ++} ++ ++static struct irq_chip aspeed_msi_bottom_irq_chip = { ++ .name = "ASPEED MSI", ++ .irq_compose_msi_msg = aspeed_msi_compose_msi_msg, ++ .irq_set_affinity = aspeed_msi_set_affinity, ++}; ++ ++static int aspeed_irq_msi_domain_alloc(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs, ++ void *args) ++{ ++ struct aspeed_pcie *pcie = domain->host_data; ++ int bit; ++ int i; ++ ++ mutex_lock(&pcie->lock); ++ ++ bit = bitmap_find_free_region(pcie->msi_irq_in_use, MAX_MSI_HOST_IRQS, ++ get_count_order(nr_irqs)); ++ ++ mutex_unlock(&pcie->lock); ++ ++ if (bit < 0) ++ return -ENOSPC; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ irq_domain_set_info(domain, virq + i, bit + i, ++ &aspeed_msi_bottom_irq_chip, ++ domain->host_data, handle_simple_irq, NULL, ++ NULL); ++ } ++ ++ return 0; ++} ++ ++static void aspeed_irq_msi_domain_free(struct irq_domain *domain, ++ unsigned int virq, unsigned int nr_irqs) ++{ ++ struct irq_data *data = irq_domain_get_irq_data(domain, virq); ++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data); ++ ++ mutex_lock(&pcie->lock); ++ ++ bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, ++ get_count_order(nr_irqs)); ++ ++ mutex_unlock(&pcie->lock); ++} ++ ++static const struct irq_domain_ops aspeed_msi_domain_ops = { ++ .alloc = aspeed_irq_msi_domain_alloc, ++ .free = aspeed_irq_msi_domain_free, ++}; ++ ++static struct irq_chip aspeed_msi_irq_chip = { ++ .name = "PCIe MSI", ++ .irq_enable = pci_msi_unmask_irq, ++ .irq_disable = pci_msi_mask_irq, ++ .irq_mask = pci_msi_mask_irq, ++ .irq_unmask = pci_msi_unmask_irq, ++}; ++ ++static struct msi_domain_info aspeed_msi_domain_info = { ++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), ++ .chip = &aspeed_msi_irq_chip, ++}; ++#endif ++ ++static void aspeed_pcie_irq_domain_free(struct aspeed_pcie *pcie) ++{ ++ if (pcie->irq_domain) { ++ irq_domain_remove(pcie->irq_domain); ++ pcie->irq_domain = NULL; ++ } ++#ifdef CONFIG_PCI_MSI ++ if (pcie->msi_domain) { ++ irq_domain_remove(pcie->msi_domain); ++ pcie->msi_domain = NULL; ++ } ++ ++ if (pcie->dev_domain) { ++ irq_domain_remove(pcie->dev_domain); ++ pcie->dev_domain = NULL; ++ } ++#endif ++} ++ ++static int aspeed_pcie_init_irq_domain(struct aspeed_pcie *pcie) ++{ ++ struct device *dev = pcie->dev; ++ struct device_node *node = dev->of_node; ++ struct device_node *pcie_intc_node; ++ int ret; ++ ++ pcie_intc_node = of_get_next_child(node, NULL); ++ if (!pcie_intc_node) ++ return dev_err_probe(dev, -ENODEV, "No PCIe Intc node found\n"); ++ ++ pcie->irq_domain = ++ irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &aspeed_intx_domain_ops, pcie); ++ of_node_put(pcie_intc_node); ++ if (!pcie->irq_domain) { ++ ret = dev_err_probe(dev, -ENOMEM, "failed to get an INTx IRQ domain\n"); ++ goto err; ++ } ++ ++ writel(0, pcie->reg + pcie->platform->reg_intx_en); ++ writel(~0, pcie->reg + pcie->platform->reg_intx_sts); ++ ++#ifdef CONFIG_PCI_MSI ++ pcie->dev_domain = ++ irq_domain_add_linear(NULL, MAX_MSI_HOST_IRQS, &aspeed_msi_domain_ops, pcie); ++ if (!pcie->dev_domain) { ++ ret = dev_err_probe(pcie->dev, -ENOMEM, "failed to create IRQ domain\n"); ++ goto err; ++ } ++ ++ pcie->msi_domain = pci_msi_create_irq_domain(dev_fwnode(pcie->dev), &aspeed_msi_domain_info, ++ pcie->dev_domain); ++ if (!pcie->msi_domain) { ++ ret = dev_err_probe(pcie->dev, -ENOMEM, "failed to create MSI domain\n"); ++ goto err; ++ } ++ ++ writel(~0, pcie->reg + pcie->platform->reg_msi_en); ++ writel(~0, pcie->reg + pcie->platform->reg_msi_en + 0x04); ++ writel(~0, pcie->reg + pcie->platform->reg_msi_sts); ++ writel(~0, pcie->reg + pcie->platform->reg_msi_sts + 0x04); ++#endif ++ return 0; ++err: ++ aspeed_pcie_irq_domain_free(pcie); ++ return ret; ++} ++ ++static void aspeed_pcie_port_init(struct aspeed_pcie *pcie) ++{ ++ u32 link_sts = 0; ++ ++ regmap_write(pcie->pciephy, PEHR_LOCK, PCIE_UNLOCK); ++ ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { ++ regmap_write(pcie->pciephy, PEHR_GLOBAL, ++ RC_SYNC_RESET_DISABLE | ROOT_COMPLEX_ID(0x3) | PCIE_RC_SLOT_ENABLE); ++ regmap_write(pcie->pciephy, PEHR_MISC_10, 0xd7040022 | DATALINK_REPORT_CAPABLE); ++ regmap_write(pcie->pciephy, PEHR_MISC_14, ++ HOTPLUG_CAPABLE_ENABLE | HOTPLUG_SURPRISE_ENABLE | ++ ATTENTION_BUTTON_ENABLE); ++ } else { ++ regmap_write(pcie->pciephy, PEHR_GLOBAL, ROOT_COMPLEX_ID(0x3)); ++ } ++ ++ if (pcie->perst_rc_out) { ++ mdelay(100); ++ gpiod_set_value(pcie->perst_rc_out, 1); ++ } ++ ++ reset_control_deassert(pcie->perst); ++ mdelay(500); ++ ++ writel(PCIE_RX_DMA_EN | PCIE_RX_LINEAR | PCIE_RX_MSI_SEL | PCIE_RX_MSI_EN | ++ PCIE_Wait_RX_TLP_CLR | PCIE_RC_RX_ENABLE | PCIE_RC_ENABLE, ++ pcie->reg + H2X_DEV_CTRL); ++ ++ writel(0x28, pcie->reg + H2X_DEV_TX_TAG); ++ ++ regmap_read(pcie->pciephy, PEHR_LINK, &link_sts); ++ if (link_sts & PCIE_LINK_STS) ++ dev_info(pcie->dev, "PCIE- Link up\n"); ++ else ++ dev_info(pcie->dev, "PCIE- Link down\n"); ++} ++ ++static ssize_t hotplug_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t len) ++{ ++ struct aspeed_pcie *pcie = dev_get_drvdata(dev); ++ ++ pcie->hotplug_event = 1; ++ ++ if (of_device_is_compatible(pcie->dev->of_node, "aspeed,ast2700-pcie")) { ++ regmap_write_bits(pcie->pciephy, PEHR_MISC_1B8, SW_ATT_BTN, SW_ATT_BTN); ++ regmap_clear_bits(pcie->pciephy, PEHR_MISC_1B8, SW_ATT_BTN); ++ } ++ ++ return len; ++} ++ ++static DEVICE_ATTR_WO(hotplug); ++ ++static void aspeed_pcie_reset_work(struct work_struct *work) ++{ ++ struct aspeed_pcie *pcie = ++ container_of(work, typeof(*pcie), rst_dwork.work); ++ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); ++ struct pci_bus *parent = host->bus; ++ struct pci_dev *dev, *temp; ++ u32 link_sts = 0; ++ u16 command; ++ ++ pci_lock_rescan_remove(); ++ ++ list_for_each_entry_safe_reverse(dev, temp, &parent->devices, ++ bus_list) { ++ pci_dev_get(dev); ++ pci_stop_and_remove_bus_device(dev); ++ /* ++ * Ensure that no new Requests will be generated from ++ * the device. ++ */ ++ pci_read_config_word(dev, PCI_COMMAND, &command); ++ command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR); ++ command |= PCI_COMMAND_INTX_DISABLE; ++ pci_write_config_word(dev, PCI_COMMAND, command); ++ pci_dev_put(dev); ++ } ++ ++ /* ++ * With perst_rc_out GPIO, the perst will only affect our PCIe controller, so it only ++ * needs to stay low for 1ms. ++ * Without perst_rc_out GPIO, the perst will affect external devices, so it needs to ++ * follow the spec and stay low for at least 100ms. ++ */ ++ reset_control_assert(pcie->perst); ++ if (pcie->perst_rc_out) { ++ gpiod_set_value(pcie->perst_rc_out, 0); ++ mdelay(1); ++ } else { ++ mdelay(100); ++ } ++ reset_control_deassert(pcie->perst); ++ if (pcie->perst_rc_out) { ++ mdelay(100); ++ gpiod_set_value(pcie->perst_rc_out, 1); ++ } ++ mdelay(10); ++ ++ regmap_read(pcie->pciephy, PEHR_LINK, &link_sts); ++ if (link_sts & PCIE_LINK_STS) ++ dev_info(pcie->dev, "PCIE- Link up\n"); ++ else ++ dev_info(pcie->dev, "PCIE- Link down\n"); ++ ++ pci_rescan_bus(host->bus); ++ pci_unlock_rescan_remove(); ++} ++ ++static irqreturn_t pcie_rst_irq_handler(int irq, void *dev_id) ++{ ++ struct aspeed_pcie *pcie = dev_id; ++ ++ schedule_delayed_work(&pcie->rst_dwork, 0); ++ ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_ast2600_setup(struct platform_device *pdev) ++{ ++ struct aspeed_pcie *pcie = platform_get_drvdata(pdev); ++ struct device *dev = pcie->dev; ++ int ret; ++ ++ if (pcie->host_bus_num != 0x80) { ++ dev_err(dev, "AST2600 only supports to start bus number 0x80\n"); ++ return -EINVAL; ++ } ++ ++ pcie->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,ahbc"); ++ if (IS_ERR(pcie->ahbc)) ++ return dev_err_probe(dev, PTR_ERR(pcie->ahbc), "failed to map ahbc base\n"); ++ ++ reset_control_assert(pcie->h2xrst); ++ mdelay(5); ++ reset_control_deassert(pcie->h2xrst); ++ ++ regmap_write(pcie->ahbc, AHBC_KEY, AHBC_UNLOCK); ++ regmap_update_bits(pcie->ahbc, AHBC_ADDR_MAPPING, PCIE_RC_MEMORY_EN, PCIE_RC_MEMORY_EN); ++ regmap_write(pcie->ahbc, AHBC_KEY, 0x1); ++ ++ regmap_write(pcie->cfg, H2X_AHB_ADDR_CONFIG0, 0xe0006000); ++ regmap_write(pcie->cfg, H2X_AHB_ADDR_CONFIG1, 0); ++ regmap_write(pcie->cfg, H2X_AHB_ADDR_CONFIG2, ~0); ++ ++ regmap_write(pcie->cfg, H2X_CTRL, H2X_BRIDGE_EN); ++ ++ aspeed_pcie_port_init(pcie); ++ ++ pcie->host->ops = &aspeed_ast2600_pcie_ops; ++ ++ pcie->perst_ep_in = devm_gpiod_get_optional(pcie->dev, "perst-ep-in", GPIOD_IN); ++ if (pcie->perst_ep_in) { ++ gpiod_set_debounce(pcie->perst_ep_in, 100); ++ irq_set_irq_type(gpiod_to_irq(pcie->perst_ep_in), IRQ_TYPE_EDGE_BOTH); ++ ret = devm_request_irq(pcie->dev, gpiod_to_irq(pcie->perst_ep_in), ++ pcie_rst_irq_handler, IRQF_SHARED, "PERST monitor", pcie); ++ if (ret) ++ return dev_err_probe(pcie->dev, ret, "Failed to request gpio irq\n"); ++ INIT_DELAYED_WORK(&pcie->rst_dwork, aspeed_pcie_reset_work); ++ } ++ pcie->perst_owner = ++ devm_gpiod_get_optional(pcie->dev, "perst-owner", GPIOD_OUT_HIGH); ++ ++ return 0; ++} ++ ++static int aspeed_ast2700_setup(struct platform_device *pdev) ++{ ++ struct aspeed_pcie *pcie = platform_get_drvdata(pdev); ++ struct device *dev = pcie->dev; ++ u32 cfg_val; ++ ++ reset_control_assert(pcie->perst); ++ ++ regmap_write(pcie->pciephy, PEHR_MISC_70, 0xa00c0); ++ regmap_write(pcie->pciephy, PEHR_MISC_78, 0x80030); ++ regmap_write(pcie->pciephy, PEHR_MISC_58, LOCAL_SCALE_SUP); ++ ++ regmap_update_bits(pcie->cfg, SCU_60, ++ RC_E2M_PATH_EN | RC_H2XS_PATH_EN | RC_H2XD_PATH_EN | RC_H2XX_PATH_EN | ++ RC_UPSTREAM_MEM_EN, ++ RC_E2M_PATH_EN | RC_H2XS_PATH_EN | RC_H2XD_PATH_EN | RC_H2XX_PATH_EN | ++ RC_UPSTREAM_MEM_EN); ++ regmap_write(pcie->cfg, SCU_64, 0xff00ff00); ++ regmap_write(pcie->cfg, SCU_70, 0); ++ regmap_write(pcie->cfg, SCU_78, (pcie->domain == 1) ? BIT(31) : 0); ++ ++ reset_control_assert(pcie->h2xrst); ++ mdelay(10); ++ reset_control_deassert(pcie->h2xrst); ++ ++ regmap_write(pcie->pciephy, PEHR_MISC_5C, 0x40000000); ++ regmap_read(pcie->pciephy, PEHR_MISC_60, &cfg_val); ++ regmap_write(pcie->pciephy, PEHR_MISC_60, ++ (cfg_val & ~PORT_TPYE) | FIELD_PREP(PORT_TPYE, PORT_TYPE_ROOT)); ++ ++ writel(0, pcie->reg + H2X_CTRL); ++ writel(H2X_BRIDGE_EN | H2X_BRIDGE_DIRECT_EN, pcie->reg + H2X_CTRL); ++ ++ /* The BAR mapping: ++ * CPU Node0(domain 0): 0x60000000 ++ * CPU Node1(domain 1): 0x80000000 ++ * IO (domain 2): 0xa0000000 ++ */ ++ writel(0x60000000 + (0x20000000 * pcie->domain), pcie->reg + H2X_REMAP_DIRECT_ADDR); ++ ++ /* Prepare for 64-bit BAR pref */ ++ writel(0x3, pcie->reg + H2X_REMAP_PREF_ADDR); ++ ++ reset_control_deassert(pcie->perst); ++ if (pcie->perst_rc_out) ++ gpiod_set_value(pcie->perst_rc_out, 1); ++ mdelay(1000); ++ ++ pcie->host->ops = &aspeed_ast2700_pcie_ops; ++ ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { ++ regmap_write_bits(pcie->pciephy, PEHR_MISC_44, ENABLE_SLOT_CAP, ++ ENABLE_SLOT_CAP); ++ regmap_write(pcie->pciephy, PEHR_MISC_3C, ++ HOTPLUG_CAPABLE_ENABLE | HOTPLUG_SURPRISE_ENABLE | ++ ATTENTION_BUTTON_ENABLE); ++ regmap_write_bits(pcie->pciephy, PEHR_MISC_38, ++ DATALINK_REPORT_CAP, DATALINK_REPORT_CAP); ++ } ++ ++ if (!aspeed_ast2700_get_link(pcie)) ++ dev_info(dev, "PCIe Link DOWN"); ++ else ++ dev_info(dev, "PCIe Link UP"); ++ ++ return 0; ++} ++ ++static int aspeed_pcie_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct pci_host_bridge *host; ++ struct aspeed_pcie *pcie; ++ struct device_node *node = dev->of_node; ++ struct resource bus_range; ++ const void *md = of_device_get_match_data(dev); ++ int irq, ret; ++ ++ if (!md) ++ return -ENODEV; ++ ++ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); ++ if (!host) ++ return -ENOMEM; ++ ++ pcie = pci_host_bridge_priv(host); ++ pcie->dev = dev; ++ pcie->tx_tag = 0; ++ platform_set_drvdata(pdev, pcie); ++ ++ pcie->platform = md; ++ pcie->host = host; ++ ++ if (of_pci_parse_bus_range(node, &bus_range)) { ++ dev_warn(dev, "Failed to parse bus range\n"); ++ pcie->host_bus_num = 0; ++ } ++ pcie->host_bus_num = bus_range.start; ++ ++ pcie->reg = devm_platform_ioremap_resource(pdev, 0); ++ ++ pcie->domain = of_get_pci_domain_nr(node); ++ ++ pcie->cfg = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,pciecfg"); ++ if (IS_ERR(pcie->cfg)) ++ return dev_err_probe(dev, PTR_ERR(pcie->cfg), "Failed to map pciecfg base\n"); ++ ++ pcie->pciephy = syscon_regmap_lookup_by_phandle(node, "aspeed,pciephy"); ++ if (IS_ERR(pcie->pciephy)) ++ return dev_err_probe(dev, PTR_ERR(pcie->pciephy), "Failed to map pciephy base\n"); ++ ++ pcie->h2xrst = devm_reset_control_get_exclusive(dev, "h2x"); ++ if (IS_ERR(pcie->h2xrst)) ++ return dev_err_probe(dev, PTR_ERR(pcie->h2xrst), "Failed to get h2x reset\n"); ++ ++ pcie->perst = devm_reset_control_get_exclusive(dev, "perst"); ++ if (IS_ERR(pcie->perst)) ++ return dev_err_probe(dev, PTR_ERR(pcie->perst), "Failed to get perst reset\n"); ++ ++ pcie->perst_rc_out = devm_gpiod_get_optional(dev, "perst-rc-out", ++ GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE); ++ ++ ret = devm_mutex_init(dev, &pcie->lock); ++ if (ret) ++ return dev_err_probe(dev, ret, "failed to init mutex\n"); ++ ++ ret = pcie->platform->setup(pdev); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to setup PCIe RC\n"); ++ ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { ++ ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_hotplug.attr); ++ if (ret) ++ return dev_err_probe(&pdev->dev, ret, "unable to create sysfs interface\n"); ++ } ++ ++ host->sysdata = pcie; ++ ++ ret = aspeed_pcie_init_irq_domain(pcie); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to initialize IntX/MSI domain\n"); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return dev_err_probe(dev, irq, "Failed to get IRQ\n"); ++ ++ ret = devm_request_irq(dev, irq, aspeed_pcie_intr_handler, IRQF_SHARED, ++ dev_name(dev), pcie); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to request IRQ\n"); ++ ++ pcie->clock = clk_get(dev, NULL); ++ if (IS_ERR(pcie->clock)) ++ return dev_err_probe(dev, PTR_ERR(pcie->clock), "Failed to request clock\n"); ++ ++ ret = clk_prepare_enable(pcie->clock); ++ if (ret) { ++ clk_put(pcie->clock); ++ return dev_err_probe(dev, ret, "Failed to enable the clock\n"); ++ } ++ ++ return pci_host_probe(host); ++} ++ ++static void aspeed_pcie_remove(struct platform_device *pdev) ++{ ++ struct aspeed_pcie *pcie = platform_get_drvdata(pdev); ++ ++ if (pcie->clock) { ++ clk_disable_unprepare(pcie->clock); ++ clk_put(pcie->clock); ++ } ++ ++ pci_stop_root_bus(pcie->host->bus); ++ pci_remove_root_bus(pcie->host->bus); ++ aspeed_pcie_irq_domain_free(pcie); ++} ++ ++static struct aspeed_pcie_rc_platform pcie_rc_ast2600 = { ++ .setup = aspeed_ast2600_setup, ++ .reg_intx_en = 0x04, ++ .reg_intx_sts = 0x08, ++ .reg_msi_en = 0x20, ++ .reg_msi_sts = 0x28, ++ .msi_address = 0x1e77005c, ++}; ++ ++static struct aspeed_pcie_rc_platform pcie_rc_ast2700 = { ++ .setup = aspeed_ast2700_setup, ++ .reg_intx_en = 0x40, ++ .reg_intx_sts = 0x48, ++ .reg_msi_en = 0x50, ++ .reg_msi_sts = 0x58, ++ .msi_address = 0x000000f0, ++}; ++ ++static const struct of_device_id aspeed_pcie_of_match[] = { ++ { .compatible = "aspeed,ast2600-pcie", .data = &pcie_rc_ast2600 }, ++ { .compatible = "aspeed,ast2700-pcie", .data = &pcie_rc_ast2700 }, ++ {} ++}; ++ ++static struct platform_driver aspeed_pcie_driver = { ++ .driver = { ++ .name = "aspeed-pcie", ++ .suppress_bind_attrs = true, ++ .of_match_table = aspeed_pcie_of_match, ++ }, ++ .probe = aspeed_pcie_probe, ++ .remove_new = aspeed_pcie_remove, ++}; ++ ++module_platform_driver(aspeed_pcie_driver); +diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig +--- a/drivers/phy/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/phy/Kconfig 2025-12-23 10:16:09.358229909 +0000 +@@ -84,6 +84,7 @@ + + source "drivers/phy/allwinner/Kconfig" + source "drivers/phy/amlogic/Kconfig" ++source "drivers/phy/aspeed/Kconfig" + source "drivers/phy/broadcom/Kconfig" + source "drivers/phy/cadence/Kconfig" + source "drivers/phy/freescale/Kconfig" +diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile +--- a/drivers/phy/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/phy/Makefile 2025-12-23 10:16:13.519160131 +0000 +@@ -13,6 +13,7 @@ + obj-$(CONFIG_PHY_AIROHA_PCIE) += phy-airoha-pcie.o + obj-y += allwinner/ \ + amlogic/ \ ++ aspeed/ \ + broadcom/ \ + cadence/ \ + freescale/ \ +diff --git a/drivers/phy/aspeed/Kconfig b/drivers/phy/aspeed/Kconfig +--- a/drivers/phy/aspeed/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/phy/aspeed/Kconfig 2025-12-23 10:16:19.139065938 +0000 +@@ -0,0 +1,23 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++# ++# PHY drivers for ASPEED ++# ++ ++config PHY_ASPEED_SGMII ++ tristate "ASPEED SGMII PHY driver" ++ select REGMAP ++ select MFD_SYSCON ++ select GENERIC_PHY ++ depends on ARCH_ASPEED ++ default n ++ help ++ Enable driver support for Aspeed AST2700 PHY SGMII. ++ ++config PHY_ASPEED_USB3 ++ tristate "ASPEED USB3 PHY driver" ++ select GENERIC_PHY ++ depends on ARCH_ASPEED ++ default n ++ help ++ Enable driver support for Aspeed AST2700 PHY USB3. +diff --git a/drivers/phy/aspeed/Makefile b/drivers/phy/aspeed/Makefile +--- a/drivers/phy/aspeed/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/phy/aspeed/Makefile 2025-12-23 10:16:19.139065938 +0000 +@@ -0,0 +1,4 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++obj-$(CONFIG_PHY_ASPEED_SGMII) += aspeed-sgmii.o ++obj-$(CONFIG_PHY_ASPEED_USB3) += aspeed-usb-phy3.o +\ No newline at end of file +diff --git a/drivers/phy/aspeed/aspeed-sgmii.c b/drivers/phy/aspeed/aspeed-sgmii.c +--- a/drivers/phy/aspeed/aspeed-sgmii.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/phy/aspeed/aspeed-sgmii.c 2025-12-23 10:16:21.033034195 +0000 +@@ -0,0 +1,218 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SGMII_CFG 0x00 ++#define SGMII_LINK_TIMER 0x08 ++#define SGMII_NWAY_ACK 0x0c ++#define SGMII_PHY_CFG1 0x18 ++#define SGMII_PHY_PIPE_CTL 0x20 ++#define SGMII_FIFO_DELAY_THREHOLD 0x28 ++#define SGMII_MODE 0x30 ++ ++#define SGMII_CFG_FIFO_MODE BIT(0) ++#define SGMII_CFG_SPEED_10M 0 ++#define SGMII_CFG_SPEED_100M BIT(4) ++#define SGMII_CFG_SPEED_1G BIT(5) ++#define SGMII_CFG_PWR_DOWN BIT(11) ++#define SGMII_CFG_AN_ENABLE BIT(12) ++#define SGMII_CFG_SW_RESET BIT(15) ++#define SGMII_PCTL_TX_NO_DEEMPH BIT(7) ++#define SGMII_MODE_ENABLE BIT(0) ++#define SGMII_MODE_USE_LOCAL_CONFIG BIT(2) ++ ++#define PLDA_CLK 0x268 ++ ++#define PLDA_CLK_SEL_INTERNAL_25M BIT(8) ++#define PLDA_CLK_FREQ_MULTI GENMASK(7, 0) ++ ++struct aspeed_sgmii { ++ struct device *dev; ++ void __iomem *regs; ++ struct regmap *plda_regmap; ++}; ++ ++static void aspeed_sgmii_set_nway(struct phy *phy) ++{ ++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy); ++ u32 reg; ++ ++ /* ++ * The PLDA frequency multiplication is X xor 0x19. ++ * (X xor 0x19) * clock source = data rate. ++ * SGMII data rate is 1.25G, so (0x2b xor 0x19) * 25MHz is equal 1.25G. ++ */ ++ reg = PLDA_CLK_SEL_INTERNAL_25M | FIELD_PREP(PLDA_CLK_FREQ_MULTI, 0x2b); ++ regmap_write(sgmii->plda_regmap, PLDA_CLK, reg); ++ ++ writel(0, sgmii->regs + SGMII_MODE); ++ ++ writel(0, sgmii->regs + SGMII_CFG); ++ reg = SGMII_CFG_SW_RESET | SGMII_CFG_PWR_DOWN; ++ writel(reg, sgmii->regs + SGMII_CFG); ++ ++ reg = SGMII_CFG_AN_ENABLE; ++ writel(reg, sgmii->regs + SGMII_CFG); ++ ++ writel(0x0c, sgmii->regs + SGMII_FIFO_DELAY_THREHOLD); ++ ++ writel(SGMII_PCTL_TX_NO_DEEMPH, sgmii->regs + SGMII_PHY_PIPE_CTL); ++ ++ /* Set link timer for Nway state change */ ++ writel(0x100, sgmii->regs + SGMII_LINK_TIMER); ++ ++ /* Bit 0 always sets to 1 in ACK message */ ++ writel(0x1, sgmii->regs + SGMII_NWAY_ACK); ++ ++ reg = SGMII_MODE_ENABLE; ++ writel(reg, sgmii->regs + SGMII_MODE); ++} ++ ++static int aspeed_sgmii_phy_init(struct phy *phy) ++{ ++ aspeed_sgmii_set_nway(phy); ++ ++ return 0; ++} ++ ++static int aspeed_sgmii_phy_exit(struct phy *phy) ++{ ++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy); ++ ++ /* Disable SGMII controller */ ++ writel(0, sgmii->regs + SGMII_MODE); ++ ++ return 0; ++} ++ ++static int aspeed_sgmii_phy_set_speed(struct phy *phy, int speed) ++{ ++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy); ++ u32 reg; ++ ++ reg = PLDA_CLK_SEL_INTERNAL_25M | FIELD_PREP(PLDA_CLK_FREQ_MULTI, 0x2b); ++ regmap_write(sgmii->plda_regmap, PLDA_CLK, reg); ++ ++ switch (speed) { ++ case SPEED_10: ++ reg = SGMII_CFG_SPEED_10M; ++ break; ++ case SPEED_100: ++ reg = SGMII_CFG_SPEED_100M; ++ break; ++ case SPEED_1000: ++ reg = SGMII_CFG_SPEED_1G; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ writel(0, sgmii->regs + SGMII_MODE); ++ ++ writel((reg >> 2), sgmii->regs + SGMII_PHY_CFG1); ++ ++ writel(0, sgmii->regs + SGMII_CFG); ++ writel(SGMII_CFG_SW_RESET | SGMII_CFG_PWR_DOWN, sgmii->regs + SGMII_CFG); ++ writel(reg, sgmii->regs + SGMII_CFG); ++ ++ writel(0x0c, sgmii->regs + SGMII_FIFO_DELAY_THREHOLD); ++ writel(SGMII_PCTL_TX_NO_DEEMPH, sgmii->regs + SGMII_PHY_PIPE_CTL); ++ ++ /* Set link timer for Nway state change */ ++ writel(0x100, sgmii->regs + SGMII_LINK_TIMER); ++ ++ /* Bit 0 always sets to 1 in ACK message */ ++ writel(0x1, sgmii->regs + SGMII_NWAY_ACK); ++ ++ writel(SGMII_MODE_ENABLE | SGMII_MODE_USE_LOCAL_CONFIG, sgmii->regs + SGMII_MODE); ++ ++ return 0; ++} ++ ++static const struct phy_ops aspeed_sgmii_phyops = { ++ .init = aspeed_sgmii_phy_init, ++ .set_speed = aspeed_sgmii_phy_set_speed, ++ .exit = aspeed_sgmii_phy_exit, ++ .owner = THIS_MODULE, ++}; ++ ++static int aspeed_sgmii_probe(struct platform_device *pdev) ++{ ++ struct aspeed_sgmii *sgmii; ++ struct resource *res; ++ struct device *dev; ++ struct device_node *np; ++ struct phy_provider *provider; ++ struct phy *phy; ++ ++ dev = &pdev->dev; ++ ++ sgmii = devm_kzalloc(dev, sizeof(*sgmii), GFP_KERNEL); ++ if (!sgmii) ++ return -ENOMEM; ++ ++ sgmii->dev = dev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "cannot get resource\n"); ++ return -ENODEV; ++ } ++ ++ sgmii->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(sgmii->regs)) { ++ dev_err(dev, "cannot map registers\n"); ++ return PTR_ERR(sgmii->regs); ++ } ++ ++ np = pdev->dev.of_node; ++ sgmii->plda_regmap = syscon_regmap_lookup_by_phandle(np, "aspeed,plda"); ++ if (IS_ERR(sgmii->plda_regmap)) { ++ dev_err(sgmii->dev, "Unable to find plda regmap (%ld)\n", ++ PTR_ERR(sgmii->plda_regmap)); ++ return PTR_ERR(sgmii->plda_regmap); ++ } ++ ++ phy = devm_phy_create(dev, NULL, &aspeed_sgmii_phyops); ++ if (IS_ERR(phy)) { ++ dev_err(&pdev->dev, "failed to create PHY\n"); ++ return PTR_ERR(phy); ++ } ++ ++ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); ++ if (IS_ERR(provider)) ++ return PTR_ERR(provider); ++ ++ phy_set_drvdata(phy, sgmii); ++ ++ dev_info(dev, "module loaded\n"); ++ ++ return 0; ++} ++ ++static const struct of_device_id aspeed_sgmii_of_matches[] = { ++ { .compatible = "aspeed,ast2700-sgmii" }, ++ { }, ++}; ++ ++static struct platform_driver aspeed_sgmii_driver = { ++ .probe = aspeed_sgmii_probe, ++ .driver = { ++ .name = "aspeed-sgmii", ++ .of_match_table = aspeed_sgmii_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_sgmii_driver); ++ ++MODULE_AUTHOR("Jacky Chou "); ++MODULE_DESCRIPTION("Control of ASPEED SGMII Device"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/phy/aspeed/aspeed-usb-phy3.c b/drivers/phy/aspeed/aspeed-usb-phy3.c +--- a/drivers/phy/aspeed/aspeed-usb-phy3.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/phy/aspeed/aspeed-usb-phy3.c 2025-12-23 10:16:21.038034111 +0000 +@@ -0,0 +1,257 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define PHY3P00_DEFAULT 0xCE70000F /* PHY PCS Protocol Setting #1 default value */ ++#define PHY3P04_DEFAULT 0x49C00014 /* PHY PCS Protocol Setting #2 default value */ ++#define PHY3P08_DEFAULT 0x5E406825 /* PHY PCS Protocol Setting #3 default value */ ++#define PHY3P0C_DEFAULT 0x00000001 /* PHY PCS Protocol Setting #4 default value */ ++ ++#define DWC_CRTL_NUM 3 ++ ++#define USB_PHY3_INIT_DONE BIT(15) /* BIT15: USB3.1 Phy internal SRAM initialization done */ ++#define USB_PHY3_SRAM_BYPASS BIT(7) /* USB3.1 Phy SRAM bypass */ ++#define USB_PHY3_SRAM_EXT_LOAD BIT(6) /* USB3.1 Phy SRAM external load done */ ++ ++struct aspeed_usb_phy3 { ++ struct device *dev; ++ void __iomem *regs; ++ const struct aspeed_usb_phy3_model *model; ++ bool phy_ext_load_quirk; ++}; ++ ++struct usb_dwc3_ctrl { ++ u32 offset; ++ u32 value; ++}; ++ ++struct aspeed_usb_phy3_model { ++ /* offsets to the PHY3 registers */ ++ unsigned int phy3s00; /* PHY SRAM Control/Status #1 */ ++ unsigned int phy3s04; /* PHY SRAM Control/Status #2 */ ++ unsigned int phy3c00; /* PHY PCS Control/Status #1 */ ++ unsigned int phy3c04; /* PHY PCS Control/Status #2 */ ++ unsigned int phy3p00; /* PHY PCS Protocol Setting #1 */ ++ unsigned int phy3p04; /* PHY PCS Protocol Setting #2 */ ++ unsigned int phy3p08; /* PHY PCS Protocol Setting #3 */ ++ unsigned int phy3p0c; /* PHY PCS Protocol Setting #4 */ ++ unsigned int dwc_cmd; /* DWC3 Commands base address offest */ ++}; ++ ++static struct usb_dwc3_ctrl ctrl_data[DWC_CRTL_NUM] = { ++ {0xc100, 0x00000006}, /* Set DWC3 GSBUSCFG0 for Bus Burst Type */ ++ {0xc12c, 0x0c854802}, /* Set DWC3 GUCTL for ref_clk */ ++ {0xc630, 0x0c800020}, /* Set DWC3 GLADJ for ref_clk */ ++}; ++ ++static const struct aspeed_usb_phy3_model ast2700a0_model = { ++ .phy3s00 = 0x800, ++ .phy3s04 = 0x804, ++ .phy3c00 = 0x808, ++ .phy3c04 = 0x80C, ++ .phy3p00 = 0x810, ++ .phy3p04 = 0x814, ++ .phy3p08 = 0x818, ++ .phy3p0c = 0x81C, ++ .dwc_cmd = 0xB80, ++}; ++ ++static const struct aspeed_usb_phy3_model ast2700_model = { ++ .phy3s00 = 0x00, ++ .phy3s04 = 0x04, ++ .phy3c00 = 0x08, ++ .phy3c04 = 0x0C, ++ .phy3p00 = 0x10, ++ .phy3p04 = 0x14, ++ .phy3p08 = 0x18, ++ .phy3p0c = 0x1C, ++ .dwc_cmd = 0x40, ++}; ++ ++static const struct of_device_id aspeed_usb_phy3_dt_ids[] = { ++ { ++ .compatible = "aspeed,ast2700-a0-uhy3a", ++ .data = &ast2700a0_model ++ }, ++ { ++ .compatible = "aspeed,ast2700-a0-uhy3b", ++ .data = &ast2700a0_model ++ }, ++ { ++ .compatible = "aspeed,ast2700-uphy3a", ++ .data = &ast2700_model ++ }, ++ { ++ .compatible = "aspeed,ast2700-uphy3b", ++ .data = &ast2700_model ++ }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, aspeed_usb_phy3_dt_ids); ++ ++static int aspeed_usb_phy3_init(struct phy *phy) ++{ ++ struct aspeed_usb_phy3 *phy3 = phy_get_drvdata(phy); ++ const struct aspeed_usb_phy3_model *model = phy3->model; ++ u32 val; ++ int timeout = 100; ++ int i, j; ++ ++ while ((readl(phy3->regs + model->phy3s00) & USB_PHY3_INIT_DONE) ++ != USB_PHY3_INIT_DONE) { ++ usleep_range(100, 110); ++ if (--timeout == 0) { ++ dev_err(phy3->dev, "Wait phy3 init timed out\n"); ++ return -ETIMEDOUT; ++ } ++ } ++ ++ val = readl(phy3->regs + model->phy3s00); ++ ++ if (phy3->phy_ext_load_quirk) ++ val |= USB_PHY3_SRAM_EXT_LOAD; ++ else ++ val |= USB_PHY3_SRAM_BYPASS; ++ writel(val, phy3->regs + model->phy3s00); ++ ++ /* Set protocol1_ext signals as default PHY3 settings based on SNPS documents. ++ * Including PCFGI[54]: protocol1_ext_rx_los_lfps_en for better compatibility ++ */ ++ writel(PHY3P00_DEFAULT, phy3->regs + model->phy3p00); ++ writel(PHY3P04_DEFAULT, phy3->regs + model->phy3p04); ++ writel(PHY3P08_DEFAULT, phy3->regs + model->phy3p08); ++ writel(PHY3P0C_DEFAULT, phy3->regs + model->phy3p0c); ++ ++ /* xHCI DWC specific command initially set when PCIe xHCI enable */ ++ for (i = 0, j = model->dwc_cmd; i < DWC_CRTL_NUM; i++) { ++ /* 48-bits Command: ++ * CMD1: Data -> DWC CMD [31:0], Address -> DWC CMD [47:32] ++ * CMD2: Data -> DWC CMD [79:48], Address -> DWC CMD [95:80] ++ * ... and etc. ++ */ ++ if (i % 2 == 0) { ++ writel(ctrl_data[i].value, phy3->regs + j); ++ j += 4; ++ ++ writel(ctrl_data[i].offset & 0xFFFF, phy3->regs + j); ++ } else { ++ val = readl(phy3->regs + j) & 0xFFFF; ++ val |= ((ctrl_data[i].value & 0xFFFF) << 16); ++ writel(val, phy3->regs + j); ++ j += 4; ++ ++ val = (ctrl_data[i].offset << 16) | (ctrl_data[i].value >> 16); ++ writel(val, phy3->regs + j); ++ j += 4; ++ } ++ } ++ ++ dev_info(phy3->dev, "Initialized USB PHY3\n"); ++ return 0; ++} ++ ++static const struct phy_ops aspeed_usb_phy3_phyops = { ++ .init = aspeed_usb_phy3_init, ++ .owner = THIS_MODULE, ++}; ++ ++static int aspeed_usb_phy3_probe(struct platform_device *pdev) ++{ ++ struct aspeed_usb_phy3 *phy3; ++ struct device *dev; ++ struct phy_provider *provider; ++ struct phy *phy; ++ struct device_node *node = pdev->dev.of_node; ++ struct clk *clk; ++ struct reset_control *rst; ++ int rc = 0; ++ ++ dev = &pdev->dev; ++ ++ phy3 = devm_kzalloc(dev, sizeof(*phy3), GFP_KERNEL); ++ if (!phy3) ++ return -ENOMEM; ++ ++ phy3->dev = dev; ++ ++ phy3->model = of_device_get_match_data(dev); ++ if (IS_ERR(phy3->model)) { ++ dev_err(dev, "Couldn't get model data\n"); ++ return -ENODEV; ++ } ++ ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ ++ rc = clk_prepare_enable(clk); ++ if (rc) { ++ dev_err(dev, "Unable to enable clock (%d)\n", rc); ++ return rc; ++ } ++ ++ rst = devm_reset_control_get_shared(dev, NULL); ++ if (IS_ERR(rst)) { ++ rc = PTR_ERR(rst); ++ goto err; ++ } ++ rc = reset_control_deassert(rst); ++ if (rc) ++ goto err; ++ ++ phy3->regs = of_iomap(node, 0); ++ ++ phy3->phy_ext_load_quirk = ++ device_property_read_bool(dev, "aspeed,phy_ext_load_quirk"); ++ ++ phy = devm_phy_create(dev, NULL, &aspeed_usb_phy3_phyops); ++ if (IS_ERR(phy)) { ++ dev_err(dev, "failed to create PHY\n"); ++ return PTR_ERR(phy); ++ } ++ ++ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); ++ if (IS_ERR(provider)) ++ return PTR_ERR(provider); ++ ++ phy_set_drvdata(phy, phy3); ++ ++ dev_info(phy3->dev, "Probed USB PHY3\n"); ++ ++ return 0; ++ ++err: ++ if (clk) ++ clk_disable_unprepare(clk); ++ return rc; ++} ++ ++static void aspeed_usb_phy3_remove(struct platform_device *pdev) ++{ ++} ++ ++static struct platform_driver aspeed_usb_phy3_driver = { ++ .probe = aspeed_usb_phy3_probe, ++ .remove = aspeed_usb_phy3_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_usb_phy3_dt_ids, ++ }, ++}; ++module_platform_driver(aspeed_usb_phy3_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Joe Wang "); +diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig +--- a/drivers/pinctrl/aspeed/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/Kconfig 2025-12-23 10:16:21.135032485 +0000 +@@ -31,3 +31,11 @@ + help + Say Y here to enable pin controller support for Aspeed's 6th + generation SoCs. GPIO is provided by a separate GPIO driver. ++ ++config PINCTRL_ASPEED_G7 ++ bool "Aspeed G7 SoC pin control" ++ depends on (ARCH_ASPEED || COMPILE_TEST) && OF ++ select PINCTRL_ASPEED ++ help ++ Say Y here to enable pin controller support for Aspeed's 7th ++ generation SoCs. GPIO is provided by a separate GPIO driver. +diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile +--- a/drivers/pinctrl/aspeed/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/Makefile 2025-12-23 10:16:21.135032485 +0000 +@@ -6,3 +6,4 @@ + obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o + obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o + obj-$(CONFIG_PINCTRL_ASPEED_G6) += pinctrl-aspeed-g6.o ++obj-$(CONFIG_PINCTRL_ASPEED_G7) += pinctrl-aspeed-g7-soc0.o pinctrl-aspeed-g7-soc1.o pinctrl-aspeed-g7-ltpi.o +\ No newline at end of file +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c 2025-12-23 10:16:21.136032468 +0000 +@@ -17,6 +17,8 @@ + #include "../pinctrl-utils.h" + #include "pinctrl-aspeed.h" + ++#define SCU040 0x040 /* Reset Control Set 1 */ ++#define SCU0C8 0x0C8 /* Debug Control */ + #define SCU400 0x400 /* Multi-function Pin Control #1 */ + #define SCU404 0x404 /* Multi-function Pin Control #2 */ + #define SCU40C 0x40C /* Multi-function Pin Control #3 */ +@@ -31,6 +33,7 @@ + #define SCU450 0x450 /* Multi-function Pin Control #14 */ + #define SCU454 0x454 /* Multi-function Pin Control #15 */ + #define SCU458 0x458 /* Multi-function Pin Control #16 */ ++#define SCU470 0x470 + #define SCU4B0 0x4B0 /* Multi-function Pin Control #17 */ + #define SCU4B4 0x4B4 /* Multi-function Pin Control #18 */ + #define SCU4B8 0x4B8 /* Multi-function Pin Control #19 */ +@@ -46,13 +49,16 @@ + #define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */ + #define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */ + #define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */ ++#define SCU650 0x650 /* Driving Strength */ + #define SCU690 0x690 /* Multi-function Pin Control #24 */ + #define SCU694 0x694 /* Multi-function Pin Control #25 */ ++#define SCU698 0x698 /* Multi-function Pin Control #26 */ + #define SCU69C 0x69C /* Multi-function Pin Control #27 */ + #define SCU6D0 0x6D0 /* Multi-function Pin Control #29 */ + #define SCUC20 0xC20 /* PCIE configuration Setting Control */ ++#define SCUC24 0xC24 /* BMC MMIO Decode Setting */ + +-#define ASPEED_G6_NR_PINS 256 ++#define ASPEED_G6_NR_PINS 258 + + #define M24 0 + SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0)); +@@ -171,81 +177,93 @@ + + #define H24 16 + SIG_EXPR_LIST_DECL_SESG(H24, RGMII3TXCK, RGMII3, SIG_DESC_SET(SCU410, 16), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(H24, RMII3RCLKO, RMII3, SIG_DESC_SET(SCU410, 16), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(H24, GPIOC0, RGMII3TXCK, RMII3RCLKO); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(H24, VPA_B0, VPA, SIG_DESC_CLEAR(SCU410, 16)); ++PIN_DECL_3(H24, GPIOC0, RGMII3TXCK, RMII3RCLKO, VPA_B0); + + #define J22 17 + SIG_EXPR_LIST_DECL_SESG(J22, RGMII3TXCTL, RGMII3, SIG_DESC_SET(SCU410, 17), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(J22, RMII3TXEN, RMII3, SIG_DESC_SET(SCU410, 17), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(J22, GPIOC1, RGMII3TXCTL, RMII3TXEN); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(J22, VPA_B1, VPA, SIG_DESC_CLEAR(SCU410, 17)); ++PIN_DECL_3(J22, GPIOC1, RGMII3TXCTL, RMII3TXEN, VPA_B1); + + #define H22 18 + SIG_EXPR_LIST_DECL_SESG(H22, RGMII3TXD0, RGMII3, SIG_DESC_SET(SCU410, 18), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(H22, RMII3TXD0, RMII3, SIG_DESC_SET(SCU410, 18), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(H22, GPIOC2, RGMII3TXD0, RMII3TXD0); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(H22, VPA_B2, VPA, SIG_DESC_CLEAR(SCU410, 18)); ++PIN_DECL_3(H22, GPIOC2, RGMII3TXD0, RMII3TXD0, VPA_B2); + + #define H23 19 + SIG_EXPR_LIST_DECL_SESG(H23, RGMII3TXD1, RGMII3, SIG_DESC_SET(SCU410, 19), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(H23, RMII3TXD1, RMII3, SIG_DESC_SET(SCU410, 19), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(H23, GPIOC3, RGMII3TXD1, RMII3TXD1); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(H23, VPA_B3, VPA, SIG_DESC_CLEAR(SCU410, 19)); ++PIN_DECL_3(H23, GPIOC3, RGMII3TXD1, RMII3TXD1, VPA_B3); + + #define G22 20 + SIG_EXPR_LIST_DECL_SESG(G22, RGMII3TXD2, RGMII3, SIG_DESC_SET(SCU410, 20), +- SIG_DESC_SET(SCU510, 0)); +-PIN_DECL_1(G22, GPIOC4, RGMII3TXD2); ++ SIG_DESC_SET(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(G22, VPA_B4, VPA, SIG_DESC_CLEAR(SCU410, 20)); ++PIN_DECL_2(G22, GPIOC4, RGMII3TXD2, VPA_B4); + + #define F22 21 + SIG_EXPR_LIST_DECL_SESG(F22, RGMII3TXD3, RGMII3, SIG_DESC_SET(SCU410, 21), +- SIG_DESC_SET(SCU510, 0)); +-PIN_DECL_1(F22, GPIOC5, RGMII3TXD3); ++ SIG_DESC_SET(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(F22, VPA_B5, VPA, SIG_DESC_CLEAR(SCU410, 21)); ++PIN_DECL_2(F22, GPIOC5, RGMII3TXD3, VPA_B5); + + #define G23 22 + SIG_EXPR_LIST_DECL_SESG(G23, RGMII3RXCK, RGMII3, SIG_DESC_SET(SCU410, 22), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(G23, RMII3RCLKI, RMII3, SIG_DESC_SET(SCU410, 22), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(G23, GPIOC6, RGMII3RXCK, RMII3RCLKI); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(G23, VPAPCLK, VPA, SIG_DESC_CLEAR(SCU410, 22)); ++PIN_DECL_3(G23, GPIOC6, RGMII3RXCK, RMII3RCLKI, VPAPCLK); + + #define G24 23 + SIG_EXPR_LIST_DECL_SESG(G24, RGMII3RXCTL, RGMII3, SIG_DESC_SET(SCU410, 23), +- SIG_DESC_SET(SCU510, 0)); +-PIN_DECL_1(G24, GPIOC7, RGMII3RXCTL); ++ SIG_DESC_SET(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(G24, VPA_B6, VPA, SIG_DESC_CLEAR(SCU410, 23)); ++PIN_DECL_2(G24, GPIOC7, RGMII3RXCTL, VPA_B6); + + #define F23 24 + SIG_EXPR_LIST_DECL_SESG(F23, RGMII3RXD0, RGMII3, SIG_DESC_SET(SCU410, 24), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(F23, RMII3RXD0, RMII3, SIG_DESC_SET(SCU410, 24), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(F23, GPIOD0, RGMII3RXD0, RMII3RXD0); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(F23, VPA_B7, VPA, SIG_DESC_CLEAR(SCU410, 24)); ++PIN_DECL_3(F23, GPIOD0, RGMII3RXD0, RMII3RXD0, VPA_B7); + + #define F26 25 + SIG_EXPR_LIST_DECL_SESG(F26, RGMII3RXD1, RGMII3, SIG_DESC_SET(SCU410, 25), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(F26, RMII3RXD1, RMII3, SIG_DESC_SET(SCU410, 25), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(F26, GPIOD1, RGMII3RXD1, RMII3RXD1); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(F26, VPA_G0, VPA, SIG_DESC_CLEAR(SCU410, 25)); ++PIN_DECL_3(F26, GPIOD1, RGMII3RXD1, RMII3RXD1, VPA_G0); + + #define F25 26 + SIG_EXPR_LIST_DECL_SESG(F25, RGMII3RXD2, RGMII3, SIG_DESC_SET(SCU410, 26), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(F25, RMII3CRSDV, RMII3, SIG_DESC_SET(SCU410, 26), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(F25, GPIOD2, RGMII3RXD2, RMII3CRSDV); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(F25, VPA_G1, VPA, SIG_DESC_CLEAR(SCU410, 26)); ++PIN_DECL_3(F25, GPIOD2, RGMII3RXD2, RMII3CRSDV, VPA_G1); + + #define E26 27 + SIG_EXPR_LIST_DECL_SESG(E26, RGMII3RXD3, RGMII3, SIG_DESC_SET(SCU410, 27), +- SIG_DESC_SET(SCU510, 0)); ++ SIG_DESC_SET(SCU510, 0)); + SIG_EXPR_LIST_DECL_SESG(E26, RMII3RXER, RMII3, SIG_DESC_SET(SCU410, 27), +- SIG_DESC_CLEAR(SCU510, 0)); +-PIN_DECL_2(E26, GPIOD3, RGMII3RXD3, RMII3RXER); ++ SIG_DESC_CLEAR(SCU510, 0)); ++SIG_EXPR_LIST_DECL_SESG(E26, VPA_G2, VPA, SIG_DESC_CLEAR(SCU410, 27)); ++PIN_DECL_3(E26, GPIOD3, RGMII3RXD3, RMII3RXER, VPA_G2); + + FUNC_GROUP_DECL(RGMII3, H24, J22, H22, H23, G22, F22, G23, G24, F23, F26, F25, + E26); +@@ -259,7 +277,9 @@ + SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(F24, RMII4RCLKO, RMII4, SIG_DESC_SET(SCU4B0, 28), + SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(F24, GPIOD4, NCTS3, RGMII4TXCK, RMII4RCLKO); ++SIG_EXPR_LIST_DECL_SESG(F24, VPA_G3, VPA, SIG_DESC_CLEAR(SCU410, 28), ++ SIG_DESC_CLEAR(SCU4B0, 28)); ++PIN_DECL_4(F24, GPIOD4, NCTS3, RGMII4TXCK, RMII4RCLKO, VPA_G3); + FUNC_GROUP_DECL(NCTS3, F24); + + #define E23 29 +@@ -268,7 +288,9 @@ + SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(E23, RMII4TXEN, RMII4, SIG_DESC_SET(SCU4B0, 29), + SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(E23, GPIOD5, NDCD3, RGMII4TXCTL, RMII4TXEN); ++SIG_EXPR_LIST_DECL_SESG(E23, VPA_G4, VPA, SIG_DESC_CLEAR(SCU410, 29), ++ SIG_DESC_CLEAR(SCU4B0, 29)); ++PIN_DECL_4(E23, GPIOD5, NDCD3, RGMII4TXCTL, RMII4TXEN, VPA_G4); + FUNC_GROUP_DECL(NDCD3, E23); + + #define E24 30 +@@ -277,7 +299,9 @@ + SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(E24, RMII4TXD0, RMII4, SIG_DESC_SET(SCU4B0, 30), + SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(E24, GPIOD6, NDSR3, RGMII4TXD0, RMII4TXD0); ++SIG_EXPR_LIST_DECL_SESG(E24, VPA_G5, VPA, SIG_DESC_CLEAR(SCU410, 30), ++ SIG_DESC_CLEAR(SCU4B0, 30)); ++PIN_DECL_4(E24, GPIOD6, NDSR3, RGMII4TXD0, RMII4TXD0, VPA_G5); + FUNC_GROUP_DECL(NDSR3, E24); + + #define E25 31 +@@ -286,73 +310,99 @@ + SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(E25, RMII4TXD1, RMII4, SIG_DESC_SET(SCU4B0, 31), + SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(E25, GPIOD7, NRI3, RGMII4TXD1, RMII4TXD1); ++SIG_EXPR_LIST_DECL_SESG(E25, VPA_G6, VPA, SIG_DESC_CLEAR(SCU410, 31), ++ SIG_DESC_CLEAR(SCU4B0, 31)); ++PIN_DECL_4(E25, GPIOD7, NRI3, RGMII4TXD1, RMII4TXD1, VPA_G6); + FUNC_GROUP_DECL(NRI3, E25); + + #define D26 32 +-SIG_EXPR_LIST_DECL_SESG(D26, NDTR3, NDTR3, SIG_DESC_SET(SCU414, 0)); ++SIG_EXPR_LIST_DECL_SESG(D26, NDTR3, NDTR3, SIG_DESC_SET(SCU414, 0), ++ SIG_DESC_CLEAR(SCU470, 16)); + SIG_EXPR_LIST_DECL_SESG(D26, RGMII4TXD2, RGMII4, SIG_DESC_SET(SCU4B4, 0), +- SIG_DESC_SET(SCU510, 1)); +-PIN_DECL_2(D26, GPIOE0, NDTR3, RGMII4TXD2); ++ SIG_DESC_CLEAR(SCU470, 16), SIG_DESC_SET(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(D26, VPA_G7, VPA, SIG_DESC_CLEAR(SCU414, 0), ++ SIG_DESC_CLEAR(SCU4B4, 0)); ++PIN_DECL_3(D26, GPIOE0, NDTR3, RGMII4TXD2, VPA_G7); + FUNC_GROUP_DECL(NDTR3, D26); + + #define D24 33 +-SIG_EXPR_LIST_DECL_SESG(D24, NRTS3, NRTS3, SIG_DESC_SET(SCU414, 1)); ++SIG_EXPR_LIST_DECL_SESG(D24, NRTS3, NRTS3, SIG_DESC_SET(SCU414, 1), ++ SIG_DESC_CLEAR(SCU470, 17)); + SIG_EXPR_LIST_DECL_SESG(D24, RGMII4TXD3, RGMII4, SIG_DESC_SET(SCU4B4, 1), +- SIG_DESC_SET(SCU510, 1)); +-PIN_DECL_2(D24, GPIOE1, NRTS3, RGMII4TXD3); ++ SIG_DESC_CLEAR(SCU470, 17), SIG_DESC_SET(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(D24, VPA_R0, VPA, SIG_DESC_CLEAR(SCU414, 1), ++ SIG_DESC_CLEAR(SCU4B4, 1)); ++PIN_DECL_3(D24, GPIOE1, NRTS3, RGMII4TXD3, VPA_R0); + FUNC_GROUP_DECL(NRTS3, D24); + + #define C25 34 +-SIG_EXPR_LIST_DECL_SESG(C25, NCTS4, NCTS4, SIG_DESC_SET(SCU414, 2)); ++SIG_EXPR_LIST_DECL_SESG(C25, NCTS4, NCTS4, SIG_DESC_SET(SCU414, 2), ++ SIG_DESC_CLEAR(SCU470, 18)); + SIG_EXPR_LIST_DECL_SESG(C25, RGMII4RXCK, RGMII4, SIG_DESC_SET(SCU4B4, 2), +- SIG_DESC_SET(SCU510, 1)); ++ SIG_DESC_CLEAR(SCU470, 18), SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(C25, RMII4RCLKI, RMII4, SIG_DESC_SET(SCU4B4, 2), +- SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(C25, GPIOE2, NCTS4, RGMII4RXCK, RMII4RCLKI); ++ SIG_DESC_CLEAR(SCU470, 18), SIG_DESC_CLEAR(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(C25, VPA_R1, VPA, SIG_DESC_CLEAR(SCU414, 2), ++ SIG_DESC_CLEAR(SCU4B4, 2)); ++PIN_DECL_4(C25, GPIOE2, NCTS4, RGMII4RXCK, RMII4RCLKI, VPA_R1); + FUNC_GROUP_DECL(NCTS4, C25); + + #define C26 35 +-SIG_EXPR_LIST_DECL_SESG(C26, NDCD4, NDCD4, SIG_DESC_SET(SCU414, 3)); ++SIG_EXPR_LIST_DECL_SESG(C26, NDCD4, NDCD4, SIG_DESC_SET(SCU414, 3), ++ SIG_DESC_CLEAR(SCU470, 19)); + SIG_EXPR_LIST_DECL_SESG(C26, RGMII4RXCTL, RGMII4, SIG_DESC_SET(SCU4B4, 3), +- SIG_DESC_SET(SCU510, 1)); +-PIN_DECL_2(C26, GPIOE3, NDCD4, RGMII4RXCTL); ++ SIG_DESC_CLEAR(SCU470, 19), SIG_DESC_SET(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(C26, VPA_R2, VPA, SIG_DESC_CLEAR(SCU414, 3), ++ SIG_DESC_CLEAR(SCU4B4, 3)); ++PIN_DECL_3(C26, GPIOE3, NDCD4, RGMII4RXCTL, VPA_R2); + FUNC_GROUP_DECL(NDCD4, C26); + + #define C24 36 +-SIG_EXPR_LIST_DECL_SESG(C24, NDSR4, NDSR4, SIG_DESC_SET(SCU414, 4)); ++SIG_EXPR_LIST_DECL_SESG(C24, NDSR4, NDSR4, SIG_DESC_SET(SCU414, 4), ++ SIG_DESC_CLEAR(SCU470, 20)); + SIG_EXPR_LIST_DECL_SESG(C24, RGMII4RXD0, RGMII4, SIG_DESC_SET(SCU4B4, 4), +- SIG_DESC_SET(SCU510, 1)); ++ SIG_DESC_CLEAR(SCU470, 20), SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(C24, RMII4RXD0, RMII4, SIG_DESC_SET(SCU4B4, 4), +- SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(C24, GPIOE4, NDSR4, RGMII4RXD0, RMII4RXD0); ++ SIG_DESC_CLEAR(SCU470, 20), SIG_DESC_CLEAR(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(C24, VPA_R3, VPA, SIG_DESC_CLEAR(SCU414, 4), ++ SIG_DESC_CLEAR(SCU4B4, 4)); ++PIN_DECL_4(C24, GPIOE4, NDSR4, RGMII4RXD0, RMII4RXD0, VPA_R3); + FUNC_GROUP_DECL(NDSR4, C24); + + #define B26 37 +-SIG_EXPR_LIST_DECL_SESG(B26, NRI4, NRI4, SIG_DESC_SET(SCU414, 5)); ++SIG_EXPR_LIST_DECL_SESG(B26, NRI4, NRI4, SIG_DESC_SET(SCU414, 5), ++ SIG_DESC_CLEAR(SCU470, 21)); + SIG_EXPR_LIST_DECL_SESG(B26, RGMII4RXD1, RGMII4, SIG_DESC_SET(SCU4B4, 5), +- SIG_DESC_SET(SCU510, 1)); ++ SIG_DESC_CLEAR(SCU470, 21), SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(B26, RMII4RXD1, RMII4, SIG_DESC_SET(SCU4B4, 5), +- SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(B26, GPIOE5, NRI4, RGMII4RXD1, RMII4RXD1); ++ SIG_DESC_CLEAR(SCU470, 21), SIG_DESC_CLEAR(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(B26, VPA_R4, VPA, SIG_DESC_CLEAR(SCU414, 5), ++ SIG_DESC_CLEAR(SCU4B4, 5)); ++PIN_DECL_4(B26, GPIOE5, NRI4, RGMII4RXD1, RMII4RXD1, VPA_R4); + FUNC_GROUP_DECL(NRI4, B26); + + #define B25 38 +-SIG_EXPR_LIST_DECL_SESG(B25, NDTR4, NDTR4, SIG_DESC_SET(SCU414, 6)); ++SIG_EXPR_LIST_DECL_SESG(B25, NDTR4, NDTR4, SIG_DESC_SET(SCU414, 6), ++ SIG_DESC_CLEAR(SCU470, 22)); + SIG_EXPR_LIST_DECL_SESG(B25, RGMII4RXD2, RGMII4, SIG_DESC_SET(SCU4B4, 6), +- SIG_DESC_SET(SCU510, 1)); ++ SIG_DESC_CLEAR(SCU470, 22), SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(B25, RMII4CRSDV, RMII4, SIG_DESC_SET(SCU4B4, 6), +- SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(B25, GPIOE6, NDTR4, RGMII4RXD2, RMII4CRSDV); ++ SIG_DESC_CLEAR(SCU470, 22), SIG_DESC_CLEAR(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(B25, VPA_R5, VPA, SIG_DESC_CLEAR(SCU414, 6), ++ SIG_DESC_CLEAR(SCU4B4, 6)); ++PIN_DECL_4(B25, GPIOE6, NDTR4, RGMII4RXD2, RMII4CRSDV, VPA_R5); + FUNC_GROUP_DECL(NDTR4, B25); + + #define B24 39 +-SIG_EXPR_LIST_DECL_SESG(B24, NRTS4, NRTS4, SIG_DESC_SET(SCU414, 7)); ++SIG_EXPR_LIST_DECL_SESG(B24, NRTS4, NRTS4, SIG_DESC_SET(SCU414, 7), ++ SIG_DESC_CLEAR(SCU470, 23)); + SIG_EXPR_LIST_DECL_SESG(B24, RGMII4RXD3, RGMII4, SIG_DESC_SET(SCU4B4, 7), +- SIG_DESC_SET(SCU510, 1)); ++ SIG_DESC_CLEAR(SCU470, 23), SIG_DESC_SET(SCU510, 1)); + SIG_EXPR_LIST_DECL_SESG(B24, RMII4RXER, RMII4, SIG_DESC_SET(SCU4B4, 7), +- SIG_DESC_CLEAR(SCU510, 1)); +-PIN_DECL_3(B24, GPIOE7, NRTS4, RGMII4RXD3, RMII4RXER); ++ SIG_DESC_CLEAR(SCU470, 23), SIG_DESC_CLEAR(SCU510, 1)); ++SIG_EXPR_LIST_DECL_SESG(B24, VPA_R6, VPA, SIG_DESC_CLEAR(SCU414, 7), ++ SIG_DESC_CLEAR(SCU4B4, 7)); ++PIN_DECL_4(B24, GPIOE7, NRTS4, RGMII4RXD3, RMII4RXER, VPA_R6); + FUNC_GROUP_DECL(NRTS4, B24); + + FUNC_GROUP_DECL(RGMII4, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25, +@@ -364,27 +414,39 @@ + #define D22 40 + SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8)); + SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8)); +-PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8); ++SIG_EXPR_LIST_DECL_SESG(D22, VPA_R7, VPA, SIG_DESC_CLEAR(SCU414, 8), ++ SIG_DESC_CLEAR(SCU4B4, 8)); ++PIN_DECL_3(D22, GPIOF0, SD1CLK, PWM8, VPA_R7); + GROUP_DECL(PWM8G0, D22); + + #define E22 41 + SIG_EXPR_LIST_DECL_SESG(E22, SD1CMD, SD1, SIG_DESC_SET(SCU414, 9)); + SIG_EXPR_LIST_DECL_SEMG(E22, PWM9, PWM9G0, PWM9, SIG_DESC_SET(SCU4B4, 9)); +-PIN_DECL_2(E22, GPIOF1, SD1CMD, PWM9); ++SIG_EXPR_LIST_DECL_SESG(E22, VPAHS, VPA, SIG_DESC_CLEAR(SCU414, 9), ++ SIG_DESC_CLEAR(SCU4B4, 9)); ++PIN_DECL_3(E22, GPIOF1, SD1CMD, PWM9, VPAHS); + GROUP_DECL(PWM9G0, E22); + + #define D23 42 + SIG_EXPR_LIST_DECL_SESG(D23, SD1DAT0, SD1, SIG_DESC_SET(SCU414, 10)); + SIG_EXPR_LIST_DECL_SEMG(D23, PWM10, PWM10G0, PWM10, SIG_DESC_SET(SCU4B4, 10)); +-PIN_DECL_2(D23, GPIOF2, SD1DAT0, PWM10); ++SIG_EXPR_LIST_DECL_SESG(D23, VPAVS, VPA, SIG_DESC_CLEAR(SCU414, 10), ++ SIG_DESC_CLEAR(SCU4B4, 10)); ++PIN_DECL_3(D23, GPIOF2, SD1DAT0, PWM10, VPAVS); + GROUP_DECL(PWM10G0, D23); + + #define C23 43 + SIG_EXPR_LIST_DECL_SESG(C23, SD1DAT1, SD1, SIG_DESC_SET(SCU414, 11)); + SIG_EXPR_LIST_DECL_SEMG(C23, PWM11, PWM11G0, PWM11, SIG_DESC_SET(SCU4B4, 11)); +-PIN_DECL_2(C23, GPIOF3, SD1DAT1, PWM11); ++SIG_EXPR_LIST_DECL_SESG(C23, VPADE, VPA, SIG_DESC_CLEAR(SCU414, 11), ++ SIG_DESC_CLEAR(SCU4B4, 11)); ++PIN_DECL_3(C23, GPIOF3, SD1DAT1, PWM11, VPADE); + GROUP_DECL(PWM11G0, C23); + ++FUNC_GROUP_DECL(VPA, H24, J22, H22, H23, G22, F22, G23, G24, F23, F26, F25, ++ E26, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25, ++ B24, D22, E22, D23, C23); ++ + #define C22 44 + SIG_EXPR_LIST_DECL_SESG(C22, SD1DAT2, SD1, SIG_DESC_SET(SCU414, 12)); + SIG_EXPR_LIST_DECL_SEMG(C22, PWM12, PWM12G0, PWM12, SIG_DESC_SET(SCU4B4, 12)); +@@ -414,7 +476,7 @@ + #define E21 48 + SIG_EXPR_LIST_DECL_SESG(E21, TXD6, UART6, SIG_DESC_SET(SCU414, 16)); + SIG_EXPR_LIST_DECL_SESG(E21, SD2CLK, SD2, SIG_DESC_SET(SCU4B4, 16), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(E21, SALT9, SALT9G0, SALT9, SIG_DESC_SET(SCU694, 16)); + PIN_DECL_3(E21, GPIOG0, TXD6, SD2CLK, SALT9); + GROUP_DECL(SALT9G0, E21); +@@ -422,7 +484,7 @@ + #define B22 49 + SIG_EXPR_LIST_DECL_SESG(B22, RXD6, UART6, SIG_DESC_SET(SCU414, 17)); + SIG_EXPR_LIST_DECL_SESG(B22, SD2CMD, SD2, SIG_DESC_SET(SCU4B4, 17), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(B22, SALT10, SALT10G0, SALT10, + SIG_DESC_SET(SCU694, 17)); + PIN_DECL_3(B22, GPIOG1, RXD6, SD2CMD, SALT10); +@@ -433,7 +495,7 @@ + #define C21 50 + SIG_EXPR_LIST_DECL_SESG(C21, TXD7, UART7, SIG_DESC_SET(SCU414, 18)); + SIG_EXPR_LIST_DECL_SESG(C21, SD2DAT0, SD2, SIG_DESC_SET(SCU4B4, 18), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(C21, SALT11, SALT11G0, SALT11, + SIG_DESC_SET(SCU694, 18)); + PIN_DECL_3(C21, GPIOG2, TXD7, SD2DAT0, SALT11); +@@ -442,7 +504,7 @@ + #define A22 51 + SIG_EXPR_LIST_DECL_SESG(A22, RXD7, UART7, SIG_DESC_SET(SCU414, 19)); + SIG_EXPR_LIST_DECL_SESG(A22, SD2DAT1, SD2, SIG_DESC_SET(SCU4B4, 19), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(A22, SALT12, SALT12G0, SALT12, + SIG_DESC_SET(SCU694, 19)); + PIN_DECL_3(A22, GPIOG3, RXD7, SD2DAT1, SALT12); +@@ -453,7 +515,7 @@ + #define A21 52 + SIG_EXPR_LIST_DECL_SESG(A21, TXD8, UART8, SIG_DESC_SET(SCU414, 20)); + SIG_EXPR_LIST_DECL_SESG(A21, SD2DAT2, SD2, SIG_DESC_SET(SCU4B4, 20), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(A21, SALT13, SALT13G0, SALT13, + SIG_DESC_SET(SCU694, 20)); + PIN_DECL_3(A21, GPIOG4, TXD8, SD2DAT2, SALT13); +@@ -462,7 +524,7 @@ + #define E20 53 + SIG_EXPR_LIST_DECL_SESG(E20, RXD8, UART8, SIG_DESC_SET(SCU414, 21)); + SIG_EXPR_LIST_DECL_SESG(E20, SD2DAT3, SD2, SIG_DESC_SET(SCU4B4, 21), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(E20, SALT14, SALT14G0, SALT14, + SIG_DESC_SET(SCU694, 21)); + PIN_DECL_3(E20, GPIOG5, RXD8, SD2DAT3, SALT14); +@@ -473,7 +535,7 @@ + #define D21 54 + SIG_EXPR_LIST_DECL_SESG(D21, TXD9, UART9, SIG_DESC_SET(SCU414, 22)); + SIG_EXPR_LIST_DECL_SESG(D21, SD2CD, SD2, SIG_DESC_SET(SCU4B4, 22), +- SIG_DESC_SET(SCU450, 1)); ++ SIG_DESC_SET(SCU450, 1)); + SIG_EXPR_LIST_DECL_SEMG(D21, SALT15, SALT15G0, SALT15, + SIG_DESC_SET(SCU694, 22)); + PIN_DECL_3(D21, GPIOG6, TXD9, SD2CD, SALT15); +@@ -583,116 +645,150 @@ + FUNC_GROUP_DECL(SIOSCI, A15); + + #define B20 72 +-SIG_EXPR_LIST_DECL_SEMG(B20, I3C3SCL, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 8)); ++SIG_EXPR_LIST_DECL_SEMG(B20, I3C3SCL, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 8), ++ SIG_DESC_CLEAR(SCU438, 20)); + SIG_EXPR_LIST_DECL_SESG(B20, SCL1, I2C1, SIG_DESC_SET(SCU4B8, 8)); +-PIN_DECL_2(B20, GPIOJ0, I3C3SCL, SCL1); ++SIG_EXPR_LIST_DECL_SESG(B20, SSCL1, SI2C1, SIG_DESC_SET(SCU698, 8)); ++PIN_DECL_3(B20, GPIOJ0, I3C3SCL, SCL1, SSCL1); + + #define A20 73 +-SIG_EXPR_LIST_DECL_SEMG(A20, I3C3SDA, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 9)); ++SIG_EXPR_LIST_DECL_SEMG(A20, I3C3SDA, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 9), ++ SIG_DESC_CLEAR(SCU438, 21)); + SIG_EXPR_LIST_DECL_SESG(A20, SDA1, I2C1, SIG_DESC_SET(SCU4B8, 9)); +-PIN_DECL_2(A20, GPIOJ1, I3C3SDA, SDA1); ++SIG_EXPR_LIST_DECL_SESG(A20, SSDA1, SI2C1, SIG_DESC_SET(SCU698, 9)); ++PIN_DECL_3(A20, GPIOJ1, I3C3SDA, SDA1, SSDA1); + + GROUP_DECL(HVI3C3, B20, A20); + FUNC_GROUP_DECL(I2C1, B20, A20); ++FUNC_GROUP_DECL(SI2C1, B20, A20); + + #define E19 74 +-SIG_EXPR_LIST_DECL_SEMG(E19, I3C4SCL, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 10)); ++SIG_EXPR_LIST_DECL_SEMG(E19, I3C4SCL, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 10), ++ SIG_DESC_CLEAR(SCU438, 22)); + SIG_EXPR_LIST_DECL_SESG(E19, SCL2, I2C2, SIG_DESC_SET(SCU4B8, 10)); +-PIN_DECL_2(E19, GPIOJ2, I3C4SCL, SCL2); ++SIG_EXPR_LIST_DECL_SESG(E19, SSCL2, SI2C2, SIG_DESC_SET(SCU698, 10)); ++PIN_DECL_3(E19, GPIOJ2, I3C4SCL, SCL2, SSCL2); + + #define D20 75 +-SIG_EXPR_LIST_DECL_SEMG(D20, I3C4SDA, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 11)); ++SIG_EXPR_LIST_DECL_SEMG(D20, I3C4SDA, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 11), ++ SIG_DESC_CLEAR(SCU438, 23)); + SIG_EXPR_LIST_DECL_SESG(D20, SDA2, I2C2, SIG_DESC_SET(SCU4B8, 11)); +-PIN_DECL_2(D20, GPIOJ3, I3C4SDA, SDA2); ++SIG_EXPR_LIST_DECL_SESG(D20, SSDA2, SI2C2, SIG_DESC_SET(SCU698, 11)); ++PIN_DECL_3(D20, GPIOJ3, I3C4SDA, SDA2, SSDA2); + + GROUP_DECL(HVI3C4, E19, D20); + FUNC_GROUP_DECL(I2C2, E19, D20); ++FUNC_GROUP_DECL(SI2C2, E19, D20); + + #define C19 76 + SIG_EXPR_LIST_DECL_SESG(C19, I3C5SCL, I3C5, SIG_DESC_SET(SCU418, 12)); + SIG_EXPR_LIST_DECL_SESG(C19, SCL3, I2C3, SIG_DESC_SET(SCU4B8, 12)); +-PIN_DECL_2(C19, GPIOJ4, I3C5SCL, SCL3); ++SIG_EXPR_LIST_DECL_SESG(C19, SSCL3, SI2C3, SIG_DESC_SET(SCU698, 12)); ++PIN_DECL_3(C19, GPIOJ4, I3C5SCL, SCL3, SSCL3); + + #define A19 77 + SIG_EXPR_LIST_DECL_SESG(A19, I3C5SDA, I3C5, SIG_DESC_SET(SCU418, 13)); + SIG_EXPR_LIST_DECL_SESG(A19, SDA3, I2C3, SIG_DESC_SET(SCU4B8, 13)); +-PIN_DECL_2(A19, GPIOJ5, I3C5SDA, SDA3); ++SIG_EXPR_LIST_DECL_SESG(A19, SSDA3, SI2C3, SIG_DESC_SET(SCU698, 13)); ++PIN_DECL_3(A19, GPIOJ5, I3C5SDA, SDA3, SSDA3); + + FUNC_GROUP_DECL(I3C5, C19, A19); + FUNC_GROUP_DECL(I2C3, C19, A19); ++FUNC_GROUP_DECL(SI2C3, C19, A19); + + #define C20 78 + SIG_EXPR_LIST_DECL_SESG(C20, I3C6SCL, I3C6, SIG_DESC_SET(SCU418, 14)); + SIG_EXPR_LIST_DECL_SESG(C20, SCL4, I2C4, SIG_DESC_SET(SCU4B8, 14)); +-PIN_DECL_2(C20, GPIOJ6, I3C6SCL, SCL4); ++SIG_EXPR_LIST_DECL_SESG(C20, SSCL4, SI2C4, SIG_DESC_SET(SCU698, 14)); ++PIN_DECL_3(C20, GPIOJ6, I3C6SCL, SCL4, SSCL4); + + #define D19 79 + SIG_EXPR_LIST_DECL_SESG(D19, I3C6SDA, I3C6, SIG_DESC_SET(SCU418, 15)); + SIG_EXPR_LIST_DECL_SESG(D19, SDA4, I2C4, SIG_DESC_SET(SCU4B8, 15)); +-PIN_DECL_2(D19, GPIOJ7, I3C6SDA, SDA4); ++SIG_EXPR_LIST_DECL_SESG(D19, SSDA4, SI2C4, SIG_DESC_SET(SCU698, 15)); ++PIN_DECL_3(D19, GPIOJ7, I3C6SDA, SDA4, SSDA4); + + FUNC_GROUP_DECL(I3C6, C20, D19); + FUNC_GROUP_DECL(I2C4, C20, D19); ++FUNC_GROUP_DECL(SI2C4, C20, D19); + + #define A11 80 + SIG_EXPR_LIST_DECL_SESG(A11, SCL5, I2C5, SIG_DESC_SET(SCU418, 16)); +-PIN_DECL_1(A11, GPIOK0, SCL5); ++SIG_EXPR_LIST_DECL_SESG(A11, SSCL5, SI2C5, SIG_DESC_SET(SCU4B8, 16)); ++PIN_DECL_2(A11, GPIOK0, SCL5, SSCL5); + + #define C11 81 + SIG_EXPR_LIST_DECL_SESG(C11, SDA5, I2C5, SIG_DESC_SET(SCU418, 17)); +-PIN_DECL_1(C11, GPIOK1, SDA5); ++SIG_EXPR_LIST_DECL_SESG(C11, SSDA5, SI2C5, SIG_DESC_SET(SCU4B8, 17)); ++PIN_DECL_2(C11, GPIOK1, SDA5, SSDA5); + + FUNC_GROUP_DECL(I2C5, A11, C11); ++FUNC_GROUP_DECL(SI2C5, A11, C11); + + #define D12 82 + SIG_EXPR_LIST_DECL_SESG(D12, SCL6, I2C6, SIG_DESC_SET(SCU418, 18)); +-PIN_DECL_1(D12, GPIOK2, SCL6); ++SIG_EXPR_LIST_DECL_SESG(D12, SSCL6, SI2C6, SIG_DESC_SET(SCU4B8, 18)); ++PIN_DECL_2(D12, GPIOK2, SCL6, SSCL6); + + #define E13 83 + SIG_EXPR_LIST_DECL_SESG(E13, SDA6, I2C6, SIG_DESC_SET(SCU418, 19)); +-PIN_DECL_1(E13, GPIOK3, SDA6); ++SIG_EXPR_LIST_DECL_SESG(E13, SSDA6, SI2C6, SIG_DESC_SET(SCU4B8, 19)); ++PIN_DECL_2(E13, GPIOK3, SDA6, SSDA6); + + FUNC_GROUP_DECL(I2C6, D12, E13); ++FUNC_GROUP_DECL(SI2C6, D12, E13); + + #define D11 84 + SIG_EXPR_LIST_DECL_SESG(D11, SCL7, I2C7, SIG_DESC_SET(SCU418, 20)); +-PIN_DECL_1(D11, GPIOK4, SCL7); ++SIG_EXPR_LIST_DECL_SESG(D11, SSCL7, SI2C7, SIG_DESC_SET(SCU4B8, 20)); ++PIN_DECL_2(D11, GPIOK4, SCL7, SSCL7); + + #define E11 85 + SIG_EXPR_LIST_DECL_SESG(E11, SDA7, I2C7, SIG_DESC_SET(SCU418, 21)); +-PIN_DECL_1(E11, GPIOK5, SDA7); ++SIG_EXPR_LIST_DECL_SESG(E11, SSDA7, SI2C7, SIG_DESC_SET(SCU4B8, 21)); ++PIN_DECL_2(E11, GPIOK5, SDA7, SSDA7); + + FUNC_GROUP_DECL(I2C7, D11, E11); ++FUNC_GROUP_DECL(SI2C7, D11, E11); + + #define F13 86 + SIG_EXPR_LIST_DECL_SESG(F13, SCL8, I2C8, SIG_DESC_SET(SCU418, 22)); +-PIN_DECL_1(F13, GPIOK6, SCL8); ++SIG_EXPR_LIST_DECL_SESG(F13, SSCL8, SI2C8, SIG_DESC_SET(SCU4B8, 22)); ++PIN_DECL_2(F13, GPIOK6, SCL8, SSCL8); + + #define E12 87 + SIG_EXPR_LIST_DECL_SESG(E12, SDA8, I2C8, SIG_DESC_SET(SCU418, 23)); +-PIN_DECL_1(E12, GPIOK7, SDA8); ++SIG_EXPR_LIST_DECL_SESG(E12, SSDA8, SI2C8, SIG_DESC_SET(SCU4B8, 23)); ++PIN_DECL_2(E12, GPIOK7, SDA8, SSDA8); + + FUNC_GROUP_DECL(I2C8, F13, E12); ++FUNC_GROUP_DECL(SI2C8, F13, E12); + + #define D15 88 + SIG_EXPR_LIST_DECL_SESG(D15, SCL9, I2C9, SIG_DESC_SET(SCU418, 24)); +-PIN_DECL_1(D15, GPIOL0, SCL9); ++SIG_EXPR_LIST_DECL_SESG(D15, SSCL9, SI2C9, SIG_DESC_SET(SCU4B8, 24)); ++PIN_DECL_2(D15, GPIOL0, SCL9, SSCL9); + + #define A14 89 + SIG_EXPR_LIST_DECL_SESG(A14, SDA9, I2C9, SIG_DESC_SET(SCU418, 25)); +-PIN_DECL_1(A14, GPIOL1, SDA9); ++SIG_EXPR_LIST_DECL_SESG(A14, SSDA9, SI2C9, SIG_DESC_SET(SCU4B8, 25)); ++PIN_DECL_2(A14, GPIOL1, SDA9, SSDA9); + + FUNC_GROUP_DECL(I2C9, D15, A14); ++FUNC_GROUP_DECL(SI2C9, D15, A14); + + #define E15 90 + SIG_EXPR_LIST_DECL_SESG(E15, SCL10, I2C10, SIG_DESC_SET(SCU418, 26)); +-PIN_DECL_1(E15, GPIOL2, SCL10); ++SIG_EXPR_LIST_DECL_SESG(E15, SSCL10, SI2C10, SIG_DESC_SET(SCU4B8, 26)); ++PIN_DECL_2(E15, GPIOL2, SCL10, SSCL10); + + #define A13 91 + SIG_EXPR_LIST_DECL_SESG(A13, SDA10, I2C10, SIG_DESC_SET(SCU418, 27)); +-PIN_DECL_1(A13, GPIOL3, SDA10); ++SIG_EXPR_LIST_DECL_SESG(A13, SSDA10, SI2C10, SIG_DESC_SET(SCU4B8, 27)); ++PIN_DECL_2(A13, GPIOL3, SDA10, SSDA10); + + FUNC_GROUP_DECL(I2C10, E15, A13); ++FUNC_GROUP_DECL(SI2C10, E15, A13); + + #define C15 92 + SSSF_PIN_DECL(C15, GPIOL4, TXD3, SIG_DESC_SET(SCU418, 28)); +@@ -987,9 +1083,8 @@ + + #define AB16 160 + SIG_EXPR_LIST_DECL_SEMG(AB16, SALT9, SALT9G1, SALT9, SIG_DESC_SET(SCU434, 0), +- SIG_DESC_CLEAR(SCU694, 16)); +-SIG_EXPR_LIST_DECL_SESG(AB16, GPIU0, GPIU0, SIG_DESC_SET(SCU434, 0), +- SIG_DESC_SET(SCU694, 16)); ++ SIG_DESC_CLEAR(SCU694, 16), SIG_DESC_SET(SCU4D4, 0)); ++SIG_EXPR_LIST_DECL_SESG(AB16, GPIU0, GPIU0, SIG_DESC_SET(SCU434, 0)); + SIG_EXPR_LIST_DECL_SESG(AB16, ADC8, ADC8); + PIN_DECL_(AB16, SIG_EXPR_LIST_PTR(AB16, SALT9), SIG_EXPR_LIST_PTR(AB16, GPIU0), + SIG_EXPR_LIST_PTR(AB16, ADC8)); +@@ -1000,9 +1095,8 @@ + + #define AA17 161 + SIG_EXPR_LIST_DECL_SEMG(AA17, SALT10, SALT10G1, SALT10, SIG_DESC_SET(SCU434, 1), +- SIG_DESC_CLEAR(SCU694, 17)); +-SIG_EXPR_LIST_DECL_SESG(AA17, GPIU1, GPIU1, SIG_DESC_SET(SCU434, 1), +- SIG_DESC_SET(SCU694, 17)); ++ SIG_DESC_CLEAR(SCU694, 17), SIG_DESC_SET(SCU4D4, 1)); ++SIG_EXPR_LIST_DECL_SESG(AA17, GPIU1, GPIU1, SIG_DESC_SET(SCU434, 1)); + SIG_EXPR_LIST_DECL_SESG(AA17, ADC9, ADC9); + PIN_DECL_(AA17, SIG_EXPR_LIST_PTR(AA17, SALT10), SIG_EXPR_LIST_PTR(AA17, GPIU1), + SIG_EXPR_LIST_PTR(AA17, ADC9)); +@@ -1013,9 +1107,8 @@ + + #define AB17 162 + SIG_EXPR_LIST_DECL_SEMG(AB17, SALT11, SALT11G1, SALT11, SIG_DESC_SET(SCU434, 2), +- SIG_DESC_CLEAR(SCU694, 18)); +-SIG_EXPR_LIST_DECL_SESG(AB17, GPIU2, GPIU2, SIG_DESC_SET(SCU434, 2), +- SIG_DESC_SET(SCU694, 18)); ++ SIG_DESC_CLEAR(SCU694, 18), SIG_DESC_SET(SCU4D4, 2)); ++SIG_EXPR_LIST_DECL_SESG(AB17, GPIU2, GPIU2, SIG_DESC_SET(SCU434, 2)); + SIG_EXPR_LIST_DECL_SESG(AB17, ADC10, ADC10); + PIN_DECL_(AB17, SIG_EXPR_LIST_PTR(AB17, SALT11), SIG_EXPR_LIST_PTR(AB17, GPIU2), + SIG_EXPR_LIST_PTR(AB17, ADC10)); +@@ -1026,9 +1119,8 @@ + + #define AE16 163 + SIG_EXPR_LIST_DECL_SEMG(AE16, SALT12, SALT12G1, SALT12, SIG_DESC_SET(SCU434, 3), +- SIG_DESC_CLEAR(SCU694, 19)); +-SIG_EXPR_LIST_DECL_SESG(AE16, GPIU3, GPIU3, SIG_DESC_SET(SCU434, 3), +- SIG_DESC_SET(SCU694, 19)); ++ SIG_DESC_CLEAR(SCU694, 19), SIG_DESC_SET(SCU4D4, 3)); ++SIG_EXPR_LIST_DECL_SESG(AE16, GPIU3, GPIU3, SIG_DESC_SET(SCU434, 3)); + SIG_EXPR_LIST_DECL_SESG(AE16, ADC11, ADC11); + PIN_DECL_(AE16, SIG_EXPR_LIST_PTR(AE16, SALT12), SIG_EXPR_LIST_PTR(AE16, GPIU3), + SIG_EXPR_LIST_PTR(AE16, ADC11)); +@@ -1039,9 +1131,8 @@ + + #define AC16 164 + SIG_EXPR_LIST_DECL_SEMG(AC16, SALT13, SALT13G1, SALT13, SIG_DESC_SET(SCU434, 4), +- SIG_DESC_CLEAR(SCU694, 20)); +-SIG_EXPR_LIST_DECL_SESG(AC16, GPIU4, GPIU4, SIG_DESC_SET(SCU434, 4), +- SIG_DESC_SET(SCU694, 20)); ++ SIG_DESC_CLEAR(SCU694, 20), SIG_DESC_SET(SCU4D4, 4)); ++SIG_EXPR_LIST_DECL_SESG(AC16, GPIU4, GPIU4, SIG_DESC_SET(SCU434, 4)); + SIG_EXPR_LIST_DECL_SESG(AC16, ADC12, ADC12); + PIN_DECL_(AC16, SIG_EXPR_LIST_PTR(AC16, SALT13), SIG_EXPR_LIST_PTR(AC16, GPIU4), + SIG_EXPR_LIST_PTR(AC16, ADC12)); +@@ -1052,9 +1143,8 @@ + + #define AA16 165 + SIG_EXPR_LIST_DECL_SEMG(AA16, SALT14, SALT14G1, SALT14, SIG_DESC_SET(SCU434, 5), +- SIG_DESC_CLEAR(SCU694, 21)); +-SIG_EXPR_LIST_DECL_SESG(AA16, GPIU5, GPIU5, SIG_DESC_SET(SCU434, 5), +- SIG_DESC_SET(SCU694, 21)); ++ SIG_DESC_CLEAR(SCU694, 21), SIG_DESC_SET(SCU4D4, 5)); ++SIG_EXPR_LIST_DECL_SESG(AA16, GPIU5, GPIU5, SIG_DESC_SET(SCU434, 5)); + SIG_EXPR_LIST_DECL_SESG(AA16, ADC13, ADC13); + PIN_DECL_(AA16, SIG_EXPR_LIST_PTR(AA16, SALT14), SIG_EXPR_LIST_PTR(AA16, GPIU5), + SIG_EXPR_LIST_PTR(AA16, ADC13)); +@@ -1065,9 +1155,8 @@ + + #define AD16 166 + SIG_EXPR_LIST_DECL_SEMG(AD16, SALT15, SALT15G1, SALT15, SIG_DESC_SET(SCU434, 6), +- SIG_DESC_CLEAR(SCU694, 22)); +-SIG_EXPR_LIST_DECL_SESG(AD16, GPIU6, GPIU6, SIG_DESC_SET(SCU434, 6), +- SIG_DESC_SET(SCU694, 22)); ++ SIG_DESC_CLEAR(SCU694, 22), SIG_DESC_SET(SCU4D4, 6)); ++SIG_EXPR_LIST_DECL_SESG(AD16, GPIU6, GPIU6, SIG_DESC_SET(SCU434, 6)); + SIG_EXPR_LIST_DECL_SESG(AD16, ADC14, ADC14); + PIN_DECL_(AD16, SIG_EXPR_LIST_PTR(AD16, SALT15), SIG_EXPR_LIST_PTR(AD16, GPIU6), + SIG_EXPR_LIST_PTR(AD16, ADC14)); +@@ -1078,9 +1167,8 @@ + + #define AC17 167 + SIG_EXPR_LIST_DECL_SEMG(AC17, SALT16, SALT16G1, SALT16, SIG_DESC_SET(SCU434, 7), +- SIG_DESC_CLEAR(SCU694, 23)); +-SIG_EXPR_LIST_DECL_SESG(AC17, GPIU7, GPIU7, SIG_DESC_SET(SCU434, 7), +- SIG_DESC_SET(SCU694, 23)); ++ SIG_DESC_CLEAR(SCU694, 23), SIG_DESC_SET(SCU4D4, 7)); ++SIG_EXPR_LIST_DECL_SESG(AC17, GPIU7, GPIU7, SIG_DESC_SET(SCU434, 7)); + SIG_EXPR_LIST_DECL_SESG(AC17, ADC15, ADC15); + PIN_DECL_(AC17, SIG_EXPR_LIST_PTR(AC17, SALT16), SIG_EXPR_LIST_PTR(AC17, GPIU7), + SIG_EXPR_LIST_PTR(AC17, ADC15)); +@@ -1205,7 +1293,7 @@ + SIG_DESC_SET(SCU4D4, 31)); + PIN_DECL_2(AB10, GPIOX7, SPI2DQ3, RXD12); + +-GROUP_DECL(QSPI2, AE8, AF8, AB9, AD9, AF9, AB10); ++GROUP_DECL(QSPI2, AF9, AB10); + FUNC_DECL_2(SPI2, SPI2, QSPI2); + + GROUP_DECL(UART12G1, AF9, AB10); +@@ -1240,15 +1328,21 @@ + FUNC_GROUP_DECL(WDTRST4, AA12); + + #define AE12 196 ++SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID, ++ SIG_DESC_SET(SCU438, 4)); + SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4)); + SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4); +-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2), ++PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2), ++ SIG_EXPR_LIST_PTR(AE12, FWSPIQ2), + SIG_EXPR_LIST_PTR(AE12, GPIOY4)); + + #define AF12 197 ++SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID, ++ SIG_DESC_SET(SCU438, 5)); + SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5)); + SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5); +-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3), ++PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3), ++ SIG_EXPR_LIST_PTR(AF12, FWSPIQ3), + SIG_EXPR_LIST_PTR(AF12, GPIOY5)); + FUNC_GROUP_DECL(FWQSPI, AE12, AF12); + +@@ -1293,7 +1387,7 @@ + SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15)); + PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13); + +-GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10); ++GROUP_DECL(QSPI1, AD11, AF10); + FUNC_DECL_2(SPI1, SPI1, QSPI1); + + GROUP_DECL(UART13G1, AD11, AF10); +@@ -1301,80 +1395,80 @@ + + #define C6 208 + SIG_EXPR_LIST_DECL_SESG(C6, RGMII1TXCK, RGMII1, SIG_DESC_SET(SCU400, 0), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(C6, RMII1RCLKO, RMII1, SIG_DESC_SET(SCU400, 0), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(C6, GPIO18A0, RGMII1TXCK, RMII1RCLKO); + + #define D6 209 + SIG_EXPR_LIST_DECL_SESG(D6, RGMII1TXCTL, RGMII1, SIG_DESC_SET(SCU400, 1), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(D6, RMII1TXEN, RMII1, SIG_DESC_SET(SCU400, 1), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(D6, GPIO18A1, RGMII1TXCTL, RMII1TXEN); + + #define D5 210 + SIG_EXPR_LIST_DECL_SESG(D5, RGMII1TXD0, RGMII1, SIG_DESC_SET(SCU400, 2), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(D5, RMII1TXD0, RMII1, SIG_DESC_SET(SCU400, 2), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(D5, GPIO18A2, RGMII1TXD0, RMII1TXD0); + + #define A3 211 + SIG_EXPR_LIST_DECL_SESG(A3, RGMII1TXD1, RGMII1, SIG_DESC_SET(SCU400, 3), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(A3, RMII1TXD1, RMII1, SIG_DESC_SET(SCU400, 3), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(A3, GPIO18A3, RGMII1TXD1, RMII1TXD1); + + #define C5 212 + SIG_EXPR_LIST_DECL_SESG(C5, RGMII1TXD2, RGMII1, SIG_DESC_SET(SCU400, 4), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + PIN_DECL_1(C5, GPIO18A4, RGMII1TXD2); + + #define E6 213 + SIG_EXPR_LIST_DECL_SESG(E6, RGMII1TXD3, RGMII1, SIG_DESC_SET(SCU400, 5), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + PIN_DECL_1(E6, GPIO18A5, RGMII1TXD3); + + #define B3 214 + SIG_EXPR_LIST_DECL_SESG(B3, RGMII1RXCK, RGMII1, SIG_DESC_SET(SCU400, 6), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(B3, RMII1RCLKI, RMII1, SIG_DESC_SET(SCU400, 6), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(B3, GPIO18A6, RGMII1RXCK, RMII1RCLKI); + + #define A2 215 + SIG_EXPR_LIST_DECL_SESG(A2, RGMII1RXCTL, RGMII1, SIG_DESC_SET(SCU400, 7), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + PIN_DECL_1(A2, GPIO18A7, RGMII1RXCTL); + + #define B2 216 + SIG_EXPR_LIST_DECL_SESG(B2, RGMII1RXD0, RGMII1, SIG_DESC_SET(SCU400, 8), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(B2, RMII1RXD0, RMII1, SIG_DESC_SET(SCU400, 8), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(B2, GPIO18B0, RGMII1RXD0, RMII1RXD0); + + #define B1 217 + SIG_EXPR_LIST_DECL_SESG(B1, RGMII1RXD1, RGMII1, SIG_DESC_SET(SCU400, 9), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(B1, RMII1RXD1, RMII1, SIG_DESC_SET(SCU400, 9), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(B1, GPIO18B1, RGMII1RXD1, RMII1RXD1); + + #define C4 218 + SIG_EXPR_LIST_DECL_SESG(C4, RGMII1RXD2, RGMII1, SIG_DESC_SET(SCU400, 10), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(C4, RMII1CRSDV, RMII1, SIG_DESC_SET(SCU400, 10), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(C4, GPIO18B2, RGMII1RXD2, RMII1CRSDV); + + #define E5 219 + SIG_EXPR_LIST_DECL_SESG(E5, RGMII1RXD3, RGMII1, SIG_DESC_SET(SCU400, 11), +- SIG_DESC_SET(SCU500, 6)); ++ SIG_DESC_SET(SCU500, 6)); + SIG_EXPR_LIST_DECL_SESG(E5, RMII1RXER, RMII1, SIG_DESC_SET(SCU400, 11), +- SIG_DESC_CLEAR(SCU500, 6)); ++ SIG_DESC_CLEAR(SCU500, 6)); + PIN_DECL_2(E5, GPIO18B3, RGMII1RXD3, RMII1RXER); + + FUNC_GROUP_DECL(RGMII1, C6, D6, D5, A3, C5, E6, B3, A2, B2, B1, C4, E5); +@@ -1382,80 +1476,80 @@ + + #define D4 220 + SIG_EXPR_LIST_DECL_SESG(D4, RGMII2TXCK, RGMII2, SIG_DESC_SET(SCU400, 12), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(D4, RMII2RCLKO, RMII2, SIG_DESC_SET(SCU400, 12), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(D4, GPIO18B4, RGMII2TXCK, RMII2RCLKO); + + #define C2 221 + SIG_EXPR_LIST_DECL_SESG(C2, RGMII2TXCTL, RGMII2, SIG_DESC_SET(SCU400, 13), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(C2, RMII2TXEN, RMII2, SIG_DESC_SET(SCU400, 13), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(C2, GPIO18B5, RGMII2TXCTL, RMII2TXEN); + + #define C1 222 + SIG_EXPR_LIST_DECL_SESG(C1, RGMII2TXD0, RGMII2, SIG_DESC_SET(SCU400, 14), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(C1, RMII2TXD0, RMII2, SIG_DESC_SET(SCU400, 14), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(C1, GPIO18B6, RGMII2TXD0, RMII2TXD0); + + #define D3 223 + SIG_EXPR_LIST_DECL_SESG(D3, RGMII2TXD1, RGMII2, SIG_DESC_SET(SCU400, 15), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(D3, RMII2TXD1, RMII2, SIG_DESC_SET(SCU400, 15), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(D3, GPIO18B7, RGMII2TXD1, RMII2TXD1); + + #define E4 224 + SIG_EXPR_LIST_DECL_SESG(E4, RGMII2TXD2, RGMII2, SIG_DESC_SET(SCU400, 16), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + PIN_DECL_1(E4, GPIO18C0, RGMII2TXD2); + + #define F5 225 + SIG_EXPR_LIST_DECL_SESG(F5, RGMII2TXD3, RGMII2, SIG_DESC_SET(SCU400, 17), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + PIN_DECL_1(F5, GPIO18C1, RGMII2TXD3); + + #define D2 226 + SIG_EXPR_LIST_DECL_SESG(D2, RGMII2RXCK, RGMII2, SIG_DESC_SET(SCU400, 18), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(D2, RMII2RCLKI, RMII2, SIG_DESC_SET(SCU400, 18), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(D2, GPIO18C2, RGMII2RXCK, RMII2RCLKI); + + #define E3 227 + SIG_EXPR_LIST_DECL_SESG(E3, RGMII2RXCTL, RGMII2, SIG_DESC_SET(SCU400, 19), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + PIN_DECL_1(E3, GPIO18C3, RGMII2RXCTL); + + #define D1 228 + SIG_EXPR_LIST_DECL_SESG(D1, RGMII2RXD0, RGMII2, SIG_DESC_SET(SCU400, 20), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(D1, RMII2RXD0, RMII2, SIG_DESC_SET(SCU400, 20), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(D1, GPIO18C4, RGMII2RXD0, RMII2RXD0); + + #define F4 229 + SIG_EXPR_LIST_DECL_SESG(F4, RGMII2RXD1, RGMII2, SIG_DESC_SET(SCU400, 21), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(F4, RMII2RXD1, RMII2, SIG_DESC_SET(SCU400, 21), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(F4, GPIO18C5, RGMII2RXD1, RMII2RXD1); + + #define E2 230 + SIG_EXPR_LIST_DECL_SESG(E2, RGMII2RXD2, RGMII2, SIG_DESC_SET(SCU400, 22), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(E2, RMII2CRSDV, RMII2, SIG_DESC_SET(SCU400, 22), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(E2, GPIO18C6, RGMII2RXD2, RMII2CRSDV); + + #define E1 231 + SIG_EXPR_LIST_DECL_SESG(E1, RGMII2RXD3, RGMII2, SIG_DESC_SET(SCU400, 23), +- SIG_DESC_SET(SCU500, 7)); ++ SIG_DESC_SET(SCU500, 7)); + SIG_EXPR_LIST_DECL_SESG(E1, RMII2RXER, RMII2, SIG_DESC_SET(SCU400, 23), +- SIG_DESC_CLEAR(SCU500, 7)); ++ SIG_DESC_CLEAR(SCU500, 7)); + PIN_DECL_2(E1, GPIO18C7, RGMII2RXD3, RMII2RXER); + + FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1); +@@ -1523,8 +1617,9 @@ + PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7); + + GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4); ++GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12); + GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4); +-FUNC_DECL_1(FWSPID, FWSPID); ++FUNC_DECL_2(FWSPID, FWSPID, FWQSPID); + FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4); + FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8); + /* +@@ -1532,13 +1627,15 @@ + * following 4 pins + */ + #define AF25 244 +-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20)); ++SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20), ++ SIG_DESC_CLEAR(SCU418, 8)); + SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20)); + PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL), + SIG_EXPR_LIST_PTR(AF25, FSI1CLK)); + + #define AE26 245 +-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21)); ++SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21), ++ SIG_DESC_CLEAR(SCU418, 9)); + SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21)); + PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA), + SIG_EXPR_LIST_PTR(AE26, FSI1DATA)); +@@ -1548,13 +1645,15 @@ + FUNC_GROUP_DECL(FSI1, AF25, AE26); + + #define AE25 246 +-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22)); ++SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22), ++ SIG_DESC_CLEAR(SCU418, 10)); + SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22)); + PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL), + SIG_EXPR_LIST_PTR(AE25, FSI2CLK)); + + #define AF24 247 +-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23)); ++SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23), ++ SIG_DESC_CLEAR(SCU418, 11)); + SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23)); + PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA), + SIG_EXPR_LIST_PTR(AF24, FSI2DATA)); +@@ -1636,6 +1735,23 @@ + FUNC_DECL_1(USB2BD, USBB); + FUNC_DECL_1(USB2BH, USBB); + ++/* bit19: Enable RC-L DMA mode ++ * bit23: Enable RC-L DMA decode ++ */ ++#define PCIERC0_DESC { ASPEED_IP_SCU, SCUC24, GENMASK(23, 19), 0x1f, 0 } ++ ++#define A7 256 ++SIG_EXPR_LIST_DECL_SESG(A7, PERST, PCIERC0, SIG_DESC_SET(SCU040, 21), ++ SIG_DESC_CLEAR(SCU0C8, 6), PCIERC0_DESC); ++PIN_DECL_(A7, SIG_EXPR_LIST_PTR(A7, PERST)); ++FUNC_GROUP_DECL(PCIERC0, A7); ++ ++#define D7 257 ++SIG_EXPR_LIST_DECL_SESG(D7, RCRST, PCIERC1, SIG_DESC_SET(SCU040, 19), ++ SIG_DESC_SET(SCU500, 24)); ++PIN_DECL_(D7, SIG_EXPR_LIST_PTR(D7, RCRST)); ++FUNC_GROUP_DECL(PCIERC1, D7); ++ + /* Pins, groups and functions are sort(1):ed alphabetically for sanity */ + + static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = { +@@ -1658,6 +1774,7 @@ + ASPEED_PINCTRL_PIN(A3), + ASPEED_PINCTRL_PIN(A4), + ASPEED_PINCTRL_PIN(A6), ++ ASPEED_PINCTRL_PIN(A7), + ASPEED_PINCTRL_PIN(AA11), + ASPEED_PINCTRL_PIN(AA12), + ASPEED_PINCTRL_PIN(AA16), +@@ -1806,6 +1923,7 @@ + ASPEED_PINCTRL_PIN(D4), + ASPEED_PINCTRL_PIN(D5), + ASPEED_PINCTRL_PIN(D6), ++ ASPEED_PINCTRL_PIN(D7), + ASPEED_PINCTRL_PIN(E1), + ASPEED_PINCTRL_PIN(E11), + ASPEED_PINCTRL_PIN(E12), +@@ -1921,6 +2039,7 @@ + ASPEED_PINCTRL_GROUP(FSI2), + ASPEED_PINCTRL_GROUP(FWSPIABR), + ASPEED_PINCTRL_GROUP(FWSPID), ++ ASPEED_PINCTRL_GROUP(FWQSPID), + ASPEED_PINCTRL_GROUP(FWQSPI), + ASPEED_PINCTRL_GROUP(FWSPIWP), + ASPEED_PINCTRL_GROUP(GPIT0), +@@ -1958,6 +2077,16 @@ + ASPEED_PINCTRL_GROUP(I2C7), + ASPEED_PINCTRL_GROUP(I2C8), + ASPEED_PINCTRL_GROUP(I2C9), ++ ASPEED_PINCTRL_GROUP(SI2C1), ++ ASPEED_PINCTRL_GROUP(SI2C2), ++ ASPEED_PINCTRL_GROUP(SI2C3), ++ ASPEED_PINCTRL_GROUP(SI2C4), ++ ASPEED_PINCTRL_GROUP(SI2C5), ++ ASPEED_PINCTRL_GROUP(SI2C6), ++ ASPEED_PINCTRL_GROUP(SI2C7), ++ ASPEED_PINCTRL_GROUP(SI2C8), ++ ASPEED_PINCTRL_GROUP(SI2C9), ++ ASPEED_PINCTRL_GROUP(SI2C10), + ASPEED_PINCTRL_GROUP(I3C1), + ASPEED_PINCTRL_GROUP(I3C2), + ASPEED_PINCTRL_GROUP(I3C3), +@@ -2073,6 +2202,8 @@ + ASPEED_PINCTRL_GROUP(SALT9G1), + ASPEED_PINCTRL_GROUP(SD1), + ASPEED_PINCTRL_GROUP(SD2), ++ ASPEED_PINCTRL_GROUP(PCIERC0), ++ ASPEED_PINCTRL_GROUP(PCIERC1), + ASPEED_PINCTRL_GROUP(EMMCG1), + ASPEED_PINCTRL_GROUP(EMMCG4), + ASPEED_PINCTRL_GROUP(EMMCG8), +@@ -2132,6 +2263,7 @@ + ASPEED_PINCTRL_GROUP(USBA), + ASPEED_PINCTRL_GROUP(USBB), + ASPEED_PINCTRL_GROUP(VB), ++ ASPEED_PINCTRL_GROUP(VPA), + ASPEED_PINCTRL_GROUP(VGAHS), + ASPEED_PINCTRL_GROUP(VGAVS), + ASPEED_PINCTRL_GROUP(WDTRST1), +@@ -2200,6 +2332,16 @@ + ASPEED_PINCTRL_FUNC(I2C7), + ASPEED_PINCTRL_FUNC(I2C8), + ASPEED_PINCTRL_FUNC(I2C9), ++ ASPEED_PINCTRL_FUNC(SI2C1), ++ ASPEED_PINCTRL_FUNC(SI2C2), ++ ASPEED_PINCTRL_FUNC(SI2C3), ++ ASPEED_PINCTRL_FUNC(SI2C4), ++ ASPEED_PINCTRL_FUNC(SI2C5), ++ ASPEED_PINCTRL_FUNC(SI2C6), ++ ASPEED_PINCTRL_FUNC(SI2C7), ++ ASPEED_PINCTRL_FUNC(SI2C8), ++ ASPEED_PINCTRL_FUNC(SI2C9), ++ ASPEED_PINCTRL_FUNC(SI2C10), + ASPEED_PINCTRL_FUNC(I3C1), + ASPEED_PINCTRL_FUNC(I3C2), + ASPEED_PINCTRL_FUNC(I3C3), +@@ -2314,6 +2456,8 @@ + ASPEED_PINCTRL_FUNC(SPI2), + ASPEED_PINCTRL_FUNC(SPI2CS1), + ASPEED_PINCTRL_FUNC(SPI2CS2), ++ ASPEED_PINCTRL_FUNC(PCIERC0), ++ ASPEED_PINCTRL_FUNC(PCIERC1), + ASPEED_PINCTRL_FUNC(TACH0), + ASPEED_PINCTRL_FUNC(TACH1), + ASPEED_PINCTRL_FUNC(TACH10), +@@ -2354,6 +2498,7 @@ + ASPEED_PINCTRL_FUNC(USB2BD), + ASPEED_PINCTRL_FUNC(USB2BH), + ASPEED_PINCTRL_FUNC(VB), ++ ASPEED_PINCTRL_FUNC(VPA), + ASPEED_PINCTRL_FUNC(VGAHS), + ASPEED_PINCTRL_FUNC(VGAVS), + ASPEED_PINCTRL_FUNC(WDTRST1), +@@ -2607,6 +2752,10 @@ + { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)}, + /* LAD0 */ + { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)}, ++ /* GPIOF */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { D22, A23 }, SCU458, GENMASK(9, 8)}, ++ /* GPIOG */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { E21, B21 }, SCU458, GENMASK(11, 10)}, + + /* MAC3 */ + { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)}, +@@ -2615,6 +2764,11 @@ + { PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)}, + { PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)}, + ++ /* GPIOJ */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { B20, A20 }, SCU650, BIT_MASK(12)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E19, D20 }, SCU650, BIT_MASK(13)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C19, A19 }, SCU650, BIT_MASK(14)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C20, D19 }, SCU650, BIT_MASK(15)}, + /* GPIO18E */ + ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, Y4, SCU40C, 4), + ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, Y4, SCU40C, 4), +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-ltpi.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-ltpi.c +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-ltpi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-ltpi.c 2025-12-23 10:16:21.136032468 +0000 +@@ -0,0 +1,1156 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pinctrl-aspeed.h" ++ ++#define SCU3B0 0x3B0 /* USB Controller Register */ ++#define SCU3B4 0x3B4 /* USB Controller Lock Register */ ++#define SCU3B8 0x3B8 /* USB Controller Secure Register #1 */ ++#define SCU3BC 0x3BC /* USB Controller Secure Register #2 */ ++#define SCU3C0 0x3C0 /* USB Controller Secure Register #3 */ ++#define SCU400 0x400 /* Multi-function Pin Control #1 */ ++#define SCU404 0x404 /* Multi-function Pin Control #2 */ ++#define SCU408 0x408 /* Multi-function Pin Control #3 */ ++#define SCU40C 0x40C /* Multi-function Pin Control #4 */ ++#define SCU410 0x410 /* Multi-function Pin Control #5 */ ++#define SCU414 0x414 /* Multi-function Pin Control #6 */ ++#define SCU418 0x418 /* Multi-function Pin Control #7 */ ++#define SCU41C 0x41C /* Multi-function Pin Control #8 */ ++#define SCU420 0x420 /* Multi-function Pin Control #9 */ ++#define SCU424 0x424 /* Multi-function Pin Control #10 */ ++#define SCU428 0x428 /* Multi-function Pin Control #11 */ ++#define SCU42C 0x42C /* Multi-function Pin Control #12 */ ++#define SCU430 0x430 /* Multi-function Pin Control #13 */ ++#define SCU434 0x434 /* Multi-function Pin Control #14 */ ++#define SCU438 0x438 /* Multi-function Pin Control #15 */ ++#define SCU43C 0x43C /* Multi-function Pin Control #16 */ ++#define SCU440 0x440 /* Multi-function Pin Control #17 */ ++#define SCU444 0x444 /* Multi-function Pin Control #18 */ ++#define SCU448 0x448 /* Multi-function Pin Control #19 */ ++#define SCU44C 0x44C /* Multi-function Pin Control #20 */ ++#define SCU450 0x450 /* Multi-function Pin Control #21 */ ++#define SCU454 0x454 /* Multi-function Pin Control #22 */ ++#define SCU458 0x458 /* Multi-function Pin Control #23 */ ++#define SCU45C 0x45C /* Multi-function Pin Control #24 */ ++#define SCU460 0x460 /* Multi-function Pin Control #25 */ ++#define SCU464 0x464 /* Multi-function Pin Control #26 */ ++#define SCU468 0x468 /* Multi-function Pin Control #27 */ ++#define SCU46C 0x46C /* Multi-function Pin Control #28 */ ++#define SCU470 0x470 /* Multi-function Pin Control #29 */ ++#define SCU474 0x474 /* Multi-function Pin Control #30 */ ++#define SCU478 0x478 /* Multi-function Pin Control #31 */ ++#define SCU47C 0x47C ++#define SCU4A0 0x4A0 /* Voltage Selection */ ++#define SCU4C0 0x4C0 /* Driving Strength #0 A-I */ ++#define SCU4C4 0x4C4 /* Driving Strength #1 J-K */ ++#define SCU4C8 0x4C8 /* Driving Strength #2 L-M */ ++#define SCU4CC 0x4CC /* Driving Strength #3 N-O */ ++#define SCU4D0 0x4D0 /* Driving Strength #4 P-Q */ ++#define SCU4D4 0x4D4 /* Driving Strength #5 R-S */ ++#define SCU4D8 0x4D8 /* Driving Strength #6 T-U */ ++#define SCU4DC 0x4DC /* Driving Strength #7 W */ ++ ++#define SCU908 0x908 /* PCIe RC PERST Pin Control */ ++ ++enum { ++ D6, ++ B7, ++ A7, ++ C6, ++ B6, ++ A6, ++ C5, ++ D5, ++ L15, ++ H17, ++ L16, ++ K15, ++ K16, ++ K14, ++ J14, ++ J16, ++ J15, ++ G17, ++ F17, ++ D17, ++ C17, ++ H16, ++ B17, ++ E17, ++ A16, ++ B16, ++ H14, ++ A15, ++ G16, ++ F15, ++ H15, ++ D16, ++ M2, ++ M1, ++ M3, ++ M4, ++ N1, ++ N2, ++ N3, ++ L5, ++ P2, ++ P1, ++ N4, ++ N5, ++ HOLE0, ++ P3, ++ R1, ++ P4, ++ J2, ++ J1, ++ K4, ++ K2, ++ K3, ++ K1, ++ L4, ++ L2, ++ L1, ++ L3, ++ A14, ++ B14, ++ HOLE1, ++ HOLE2, ++ HOLE3, ++ HOLE4, ++ U15, ++ U16, ++ T17, ++ P12, ++ R13, ++ T15, ++ T16, ++ R12, ++ R14, ++ R17, ++ R15, ++ P13, ++ R16, ++ P14, ++ P17, ++ P16, ++ P15, ++ N17, ++ N14, ++ M17, ++ N15, ++ N16, ++ L17, ++ K17, ++ J17, ++ M14, ++ M15, ++ M16, ++ A4, ++ B5, ++ A5, ++ B4, ++ A10, ++ B9, ++ C7, ++ E8, ++ E7, ++ B8, ++ A9, ++ A8, ++ HOLE5, ++ HOLE6, ++ HOLE7, ++ HOLE8, ++ HOLE9, ++ HOLE10, ++ HOLE11, ++ HOLE12, ++ E13, ++ C12, ++ D12, ++ E12, ++ D13, ++ HOLE13, ++ B12, ++ HOLE14, ++ D15, ++ B13, ++ C14, ++ C13, ++ D14, ++ F13, ++ E14, ++ HOLE15, ++ HOLE16, ++ HOLE17, ++ B1, ++ C2, ++ D3, ++ C1, ++ E5, ++ HOLE18, ++ C11, ++ D11, ++ HOLE19, ++ HOLE20, ++ C10, ++ D10, ++ HOLE21, ++ HOLE22, ++ HOLE23, ++ HOLE24, ++ D9, ++ E9, ++ C9, ++ D8, ++ C8, ++ B11, ++ B10, ++ A12, ++ A11, ++ D7, ++ HOLE25, ++ HOLE26, ++ HOLE27, ++ HOLE28, ++ G2, ++ G1, ++ G4, ++ H3, ++ H2, ++ H1, ++ J4, ++ J3, ++ C4, ++ A2, ++ C3, ++ B2, ++ B3, ++ A3, ++ F2, ++ F1, ++ D2, ++ D1, ++ F4, ++ E4, ++ E2, ++ E1, ++ F3, ++ G5, ++ T4, ++ T3, ++ R5, ++ R6, ++ T2, ++ T5, ++ U2, ++ R7, ++ P9, ++ U4, ++ U5, ++ U6, ++ T7, ++ U7, ++ R10, ++ R11, ++ C16, ++ F16, ++ B15, ++ G15, ++ G14, ++ F14, ++ E15, ++ C15, ++}; ++ ++GROUP_DECL(TACH0, L15); ++GROUP_DECL(TACH1, H17); ++GROUP_DECL(TACH2, L16); ++GROUP_DECL(TACH3, K15); ++GROUP_DECL(TACH4, K16); ++GROUP_DECL(TACH5, K14); ++GROUP_DECL(TACH6, J14); ++GROUP_DECL(TACH7, J16); ++GROUP_DECL(TACH8, J15); ++GROUP_DECL(TACH9, G17); ++GROUP_DECL(TACH10, F17); ++GROUP_DECL(TACH11, D17); ++GROUP_DECL(TACH12, C17); ++GROUP_DECL(TACH13, H16); ++GROUP_DECL(TACH14, B17); ++GROUP_DECL(TACH15, E17); ++GROUP_DECL(PWM0, A16); ++GROUP_DECL(PWM1, B16); ++GROUP_DECL(PWM2, H14); ++GROUP_DECL(PWM3, A15); ++GROUP_DECL(PWM4, G16); ++GROUP_DECL(PWM5, F15); ++GROUP_DECL(PWM6, H15); ++GROUP_DECL(PWM7, D16); ++GROUP_DECL(PWM8, K4); ++GROUP_DECL(PWM9, K2); ++GROUP_DECL(PWM10, K3); ++GROUP_DECL(PWM11, K1); ++GROUP_DECL(PWM12, L4); ++GROUP_DECL(PWM13, L2); ++GROUP_DECL(PWM14, L1); ++GROUP_DECL(PWM15, L3); ++GROUP_DECL(ADC0, T4); ++GROUP_DECL(ADC1, T3); ++GROUP_DECL(ADC2, R5); ++GROUP_DECL(ADC3, R6); ++GROUP_DECL(ADC4, T2); ++GROUP_DECL(ADC5, T5); ++GROUP_DECL(ADC6, U2); ++GROUP_DECL(ADC7, R7); ++GROUP_DECL(ADC8, P9); ++GROUP_DECL(ADC9, U4); ++GROUP_DECL(ADC10, U5); ++GROUP_DECL(ADC11, U6); ++GROUP_DECL(ADC12, T7); ++GROUP_DECL(ADC13, U7); ++GROUP_DECL(ADC14, R10); ++GROUP_DECL(ADC15, R11); ++GROUP_DECL(SGPM1, F14, E15, A14, B14); ++GROUP_DECL(I2C0, G2, G1); ++GROUP_DECL(I2C1, G4, H3); ++GROUP_DECL(I2C2, H2, H1); ++GROUP_DECL(I2C3, J4, J3); ++GROUP_DECL(I2C4, C4, A2); ++GROUP_DECL(I2C5, C3, B2); ++GROUP_DECL(I2C6, B3, A3); ++GROUP_DECL(I2C7, F2, F1); ++GROUP_DECL(I2C8, D2, D1); ++GROUP_DECL(I2C9, F4, E4); ++GROUP_DECL(I2C10, E2, E1); ++GROUP_DECL(I2C11, F3, G5); ++GROUP_DECL(I2C12, J1, K2); ++GROUP_DECL(I2C13, K3, K1); ++GROUP_DECL(I2C14, L4, L2); ++GROUP_DECL(I2C15, L1, L3); ++GROUP_DECL(HVI3C12, U15, U16); ++GROUP_DECL(HVI3C13, T17, P12); ++GROUP_DECL(HVI3C14, R13, T15); ++GROUP_DECL(HVI3C15, T16, R12); ++GROUP_DECL(I3C4, R14, R17); ++GROUP_DECL(I3C5, R15, P13); ++GROUP_DECL(I3C6, R16, P14); ++GROUP_DECL(I3C7, P17, P16); ++GROUP_DECL(I3C8, P15, N17); ++GROUP_DECL(I3C9, N14, M17); ++GROUP_DECL(I3C10, N15, N16); ++GROUP_DECL(I3C11, L17, K17); ++GROUP_DECL(HVI3C0, J17, M14); ++GROUP_DECL(HVI3C1, M15, M16); ++GROUP_DECL(HVI3C2, A4, B5); ++GROUP_DECL(HVI3C3, A5, B4); ++GROUP_DECL(JTAGM, C2, D3, C1, E5); ++ ++static struct aspeed_pin_group aspeed_g7_ltpi_pingroups[] = { ++ ASPEED_PINCTRL_GROUP(TACH0), ++ ASPEED_PINCTRL_GROUP(TACH1), ++ ASPEED_PINCTRL_GROUP(TACH2), ++ ASPEED_PINCTRL_GROUP(TACH3), ++ ASPEED_PINCTRL_GROUP(TACH4), ++ ASPEED_PINCTRL_GROUP(TACH5), ++ ASPEED_PINCTRL_GROUP(TACH6), ++ ASPEED_PINCTRL_GROUP(TACH7), ++ ASPEED_PINCTRL_GROUP(TACH8), ++ ASPEED_PINCTRL_GROUP(TACH9), ++ ASPEED_PINCTRL_GROUP(TACH10), ++ ASPEED_PINCTRL_GROUP(TACH11), ++ ASPEED_PINCTRL_GROUP(TACH12), ++ ASPEED_PINCTRL_GROUP(TACH13), ++ ASPEED_PINCTRL_GROUP(TACH14), ++ ASPEED_PINCTRL_GROUP(TACH15), ++ ASPEED_PINCTRL_GROUP(PWM0), ++ ASPEED_PINCTRL_GROUP(PWM1), ++ ASPEED_PINCTRL_GROUP(PWM2), ++ ASPEED_PINCTRL_GROUP(PWM3), ++ ASPEED_PINCTRL_GROUP(PWM4), ++ ASPEED_PINCTRL_GROUP(PWM5), ++ ASPEED_PINCTRL_GROUP(PWM6), ++ ASPEED_PINCTRL_GROUP(PWM7), ++ ASPEED_PINCTRL_GROUP(PWM8), ++ ASPEED_PINCTRL_GROUP(PWM9), ++ ASPEED_PINCTRL_GROUP(PWM10), ++ ASPEED_PINCTRL_GROUP(PWM11), ++ ASPEED_PINCTRL_GROUP(PWM12), ++ ASPEED_PINCTRL_GROUP(PWM13), ++ ASPEED_PINCTRL_GROUP(PWM14), ++ ASPEED_PINCTRL_GROUP(PWM15), ++ ASPEED_PINCTRL_GROUP(ADC0), ++ ASPEED_PINCTRL_GROUP(ADC1), ++ ASPEED_PINCTRL_GROUP(ADC2), ++ ASPEED_PINCTRL_GROUP(ADC3), ++ ASPEED_PINCTRL_GROUP(ADC4), ++ ASPEED_PINCTRL_GROUP(ADC5), ++ ASPEED_PINCTRL_GROUP(ADC6), ++ ASPEED_PINCTRL_GROUP(ADC7), ++ ASPEED_PINCTRL_GROUP(ADC8), ++ ASPEED_PINCTRL_GROUP(ADC9), ++ ASPEED_PINCTRL_GROUP(ADC10), ++ ASPEED_PINCTRL_GROUP(ADC11), ++ ASPEED_PINCTRL_GROUP(ADC12), ++ ASPEED_PINCTRL_GROUP(ADC13), ++ ASPEED_PINCTRL_GROUP(ADC14), ++ ASPEED_PINCTRL_GROUP(ADC15), ++ ASPEED_PINCTRL_GROUP(I2C0), ++ ASPEED_PINCTRL_GROUP(I2C1), ++ ASPEED_PINCTRL_GROUP(I2C2), ++ ASPEED_PINCTRL_GROUP(I2C3), ++ ASPEED_PINCTRL_GROUP(I2C4), ++ ASPEED_PINCTRL_GROUP(I2C5), ++ ASPEED_PINCTRL_GROUP(I2C6), ++ ASPEED_PINCTRL_GROUP(I2C7), ++ ASPEED_PINCTRL_GROUP(I2C8), ++ ASPEED_PINCTRL_GROUP(I2C9), ++ ASPEED_PINCTRL_GROUP(I2C10), ++ ASPEED_PINCTRL_GROUP(I2C11), ++ ASPEED_PINCTRL_GROUP(I2C12), ++ ASPEED_PINCTRL_GROUP(I2C13), ++ ASPEED_PINCTRL_GROUP(I2C14), ++ ASPEED_PINCTRL_GROUP(I2C15), ++ ASPEED_PINCTRL_GROUP(HVI3C12), ++ ASPEED_PINCTRL_GROUP(HVI3C13), ++ ASPEED_PINCTRL_GROUP(HVI3C14), ++ ASPEED_PINCTRL_GROUP(HVI3C15), ++ ASPEED_PINCTRL_GROUP(I3C4), ++ ASPEED_PINCTRL_GROUP(I3C5), ++ ASPEED_PINCTRL_GROUP(I3C6), ++ ASPEED_PINCTRL_GROUP(I3C7), ++ ASPEED_PINCTRL_GROUP(I3C8), ++ ASPEED_PINCTRL_GROUP(I3C9), ++ ASPEED_PINCTRL_GROUP(I3C10), ++ ASPEED_PINCTRL_GROUP(I3C11), ++ ASPEED_PINCTRL_GROUP(HVI3C0), ++ ASPEED_PINCTRL_GROUP(HVI3C1), ++ ASPEED_PINCTRL_GROUP(HVI3C2), ++ ASPEED_PINCTRL_GROUP(HVI3C3), ++ ASPEED_PINCTRL_GROUP(JTAGM), ++ ASPEED_PINCTRL_GROUP(SGPM1), ++}; ++ ++FUNC_DECL_(TACH0, "TACH0"); ++FUNC_DECL_(TACH1, "TACH1"); ++FUNC_DECL_(TACH2, "TACH2"); ++FUNC_DECL_(TACH3, "TACH3"); ++FUNC_DECL_(TACH4, "TACH4"); ++FUNC_DECL_(TACH5, "TACH5"); ++FUNC_DECL_(TACH6, "TACH6"); ++FUNC_DECL_(TACH7, "TACH7"); ++FUNC_DECL_(TACH8, "TACH8"); ++FUNC_DECL_(TACH9, "TACH9"); ++FUNC_DECL_(TACH10, "TACH10"); ++FUNC_DECL_(TACH11, "TACH11"); ++FUNC_DECL_(TACH12, "TACH12"); ++FUNC_DECL_(TACH13, "TACH13"); ++FUNC_DECL_(TACH14, "TACH14"); ++FUNC_DECL_(TACH15, "TACH15"); ++FUNC_DECL_(PWM0, "PWM0"); ++FUNC_DECL_(PWM1, "PWM1"); ++FUNC_DECL_(PWM2, "PWM2"); ++FUNC_DECL_(PWM3, "PWM3"); ++FUNC_DECL_(PWM4, "PWM4"); ++FUNC_DECL_(PWM5, "PWM5"); ++FUNC_DECL_(PWM6, "PWM6"); ++FUNC_DECL_(PWM7, "PWM7"); ++FUNC_DECL_(PWM8, "PWM8"); ++FUNC_DECL_(PWM9, "PWM9"); ++FUNC_DECL_(PWM10, "PWM10"); ++FUNC_DECL_(PWM11, "PWM11"); ++FUNC_DECL_(PWM12, "PWM12"); ++FUNC_DECL_(PWM13, "PWM13"); ++FUNC_DECL_(PWM14, "PWM14"); ++FUNC_DECL_(PWM15, "PWM15"); ++FUNC_DECL_(ADC0, "ADC0"); ++FUNC_DECL_(ADC1, "ADC1"); ++FUNC_DECL_(ADC2, "ADC2"); ++FUNC_DECL_(ADC3, "ADC3"); ++FUNC_DECL_(ADC4, "ADC4"); ++FUNC_DECL_(ADC5, "ADC5"); ++FUNC_DECL_(ADC6, "ADC6"); ++FUNC_DECL_(ADC7, "ADC7"); ++FUNC_DECL_(ADC8, "ADC8"); ++FUNC_DECL_(ADC9, "ADC9"); ++FUNC_DECL_(ADC10, "ADC10"); ++FUNC_DECL_(ADC11, "ADC11"); ++FUNC_DECL_(ADC12, "ADC12"); ++FUNC_DECL_(ADC13, "ADC13"); ++FUNC_DECL_(ADC14, "ADC14"); ++FUNC_DECL_(ADC15, "ADC15"); ++FUNC_DECL_(I2C0, "I2C0"); ++FUNC_DECL_(I2C1, "I2C1"); ++FUNC_DECL_(I2C2, "I2C2"); ++FUNC_DECL_(I2C3, "I2C3"); ++FUNC_DECL_(I2C4, "I2C4"); ++FUNC_DECL_(I2C5, "I2C5"); ++FUNC_DECL_(I2C6, "I2C6"); ++FUNC_DECL_(I2C7, "I2C7"); ++FUNC_DECL_(I2C8, "I2C8"); ++FUNC_DECL_(I2C9, "I2C9"); ++FUNC_DECL_(I2C10, "I2C10"); ++FUNC_DECL_(I2C11, "I2C11"); ++FUNC_DECL_(I2C12, "I2C12"); ++FUNC_DECL_(I2C13, "I2C13"); ++FUNC_DECL_(I2C14, "I2C14"); ++FUNC_DECL_(I2C15, "I2C15"); ++FUNC_DECL_(I3C12, "HVI3C12"); ++FUNC_DECL_(I3C13, "HVI3C13"); ++FUNC_DECL_(I3C14, "HVI3C14"); ++FUNC_DECL_(I3C15, "HVI3C15"); ++FUNC_DECL_(I3C4, "I3C4"); ++FUNC_DECL_(I3C5, "I3C5"); ++FUNC_DECL_(I3C6, "I3C6"); ++FUNC_DECL_(I3C7, "I3C7"); ++FUNC_DECL_(I3C8, "I3C8"); ++FUNC_DECL_(I3C9, "I3C9"); ++FUNC_DECL_(I3C10, "I3C10"); ++FUNC_DECL_(I3C11, "I3C11"); ++FUNC_DECL_(I3C0, "HVI3C0"); ++FUNC_DECL_(I3C1, "HVI3C1"); ++FUNC_DECL_(I3C2, "HVI3C2"); ++FUNC_DECL_(I3C3, "HVI3C3"); ++FUNC_DECL_(JTAGM, "JTAGM"); ++FUNC_DECL_(SGPM1, "SGPM1"); ++ ++static struct aspeed_pin_function aspeed_g7_ltpi_funcs[] = { ++ ASPEED_PINCTRL_FUNC(TACH0), ++ ASPEED_PINCTRL_FUNC(TACH1), ++ ASPEED_PINCTRL_FUNC(TACH2), ++ ASPEED_PINCTRL_FUNC(TACH3), ++ ASPEED_PINCTRL_FUNC(TACH4), ++ ASPEED_PINCTRL_FUNC(TACH5), ++ ASPEED_PINCTRL_FUNC(TACH6), ++ ASPEED_PINCTRL_FUNC(TACH7), ++ ASPEED_PINCTRL_FUNC(TACH8), ++ ASPEED_PINCTRL_FUNC(TACH9), ++ ASPEED_PINCTRL_FUNC(TACH10), ++ ASPEED_PINCTRL_FUNC(TACH11), ++ ASPEED_PINCTRL_FUNC(TACH12), ++ ASPEED_PINCTRL_FUNC(TACH13), ++ ASPEED_PINCTRL_FUNC(TACH14), ++ ASPEED_PINCTRL_FUNC(TACH15), ++ ASPEED_PINCTRL_FUNC(PWM0), ++ ASPEED_PINCTRL_FUNC(PWM1), ++ ASPEED_PINCTRL_FUNC(PWM2), ++ ASPEED_PINCTRL_FUNC(PWM3), ++ ASPEED_PINCTRL_FUNC(PWM4), ++ ASPEED_PINCTRL_FUNC(PWM5), ++ ASPEED_PINCTRL_FUNC(PWM6), ++ ASPEED_PINCTRL_FUNC(PWM7), ++ ASPEED_PINCTRL_FUNC(PWM8), ++ ASPEED_PINCTRL_FUNC(PWM9), ++ ASPEED_PINCTRL_FUNC(PWM10), ++ ASPEED_PINCTRL_FUNC(PWM11), ++ ASPEED_PINCTRL_FUNC(PWM12), ++ ASPEED_PINCTRL_FUNC(PWM13), ++ ASPEED_PINCTRL_FUNC(PWM14), ++ ASPEED_PINCTRL_FUNC(PWM15), ++ ASPEED_PINCTRL_FUNC(ADC0), ++ ASPEED_PINCTRL_FUNC(ADC1), ++ ASPEED_PINCTRL_FUNC(ADC2), ++ ASPEED_PINCTRL_FUNC(ADC3), ++ ASPEED_PINCTRL_FUNC(ADC4), ++ ASPEED_PINCTRL_FUNC(ADC5), ++ ASPEED_PINCTRL_FUNC(ADC6), ++ ASPEED_PINCTRL_FUNC(ADC7), ++ ASPEED_PINCTRL_FUNC(ADC8), ++ ASPEED_PINCTRL_FUNC(ADC9), ++ ASPEED_PINCTRL_FUNC(ADC10), ++ ASPEED_PINCTRL_FUNC(ADC11), ++ ASPEED_PINCTRL_FUNC(ADC12), ++ ASPEED_PINCTRL_FUNC(ADC13), ++ ASPEED_PINCTRL_FUNC(ADC14), ++ ASPEED_PINCTRL_FUNC(ADC15), ++ ASPEED_PINCTRL_FUNC(I2C0), ++ ASPEED_PINCTRL_FUNC(I2C1), ++ ASPEED_PINCTRL_FUNC(I2C2), ++ ASPEED_PINCTRL_FUNC(I2C3), ++ ASPEED_PINCTRL_FUNC(I2C4), ++ ASPEED_PINCTRL_FUNC(I2C5), ++ ASPEED_PINCTRL_FUNC(I2C6), ++ ASPEED_PINCTRL_FUNC(I2C7), ++ ASPEED_PINCTRL_FUNC(I2C8), ++ ASPEED_PINCTRL_FUNC(I2C9), ++ ASPEED_PINCTRL_FUNC(I2C10), ++ ASPEED_PINCTRL_FUNC(I2C11), ++ ASPEED_PINCTRL_FUNC(I2C12), ++ ASPEED_PINCTRL_FUNC(I2C13), ++ ASPEED_PINCTRL_FUNC(I2C14), ++ ASPEED_PINCTRL_FUNC(I2C15), ++ ASPEED_PINCTRL_FUNC(I3C12), ++ ASPEED_PINCTRL_FUNC(I3C13), ++ ASPEED_PINCTRL_FUNC(I3C14), ++ ASPEED_PINCTRL_FUNC(I3C15), ++ ASPEED_PINCTRL_FUNC(I3C4), ++ ASPEED_PINCTRL_FUNC(I3C5), ++ ASPEED_PINCTRL_FUNC(I3C6), ++ ASPEED_PINCTRL_FUNC(I3C7), ++ ASPEED_PINCTRL_FUNC(I3C8), ++ ASPEED_PINCTRL_FUNC(I3C9), ++ ASPEED_PINCTRL_FUNC(I3C10), ++ ASPEED_PINCTRL_FUNC(I3C11), ++ ASPEED_PINCTRL_FUNC(I3C0), ++ ASPEED_PINCTRL_FUNC(I3C1), ++ ASPEED_PINCTRL_FUNC(I3C2), ++ ASPEED_PINCTRL_FUNC(I3C3), ++ ASPEED_PINCTRL_FUNC(JTAGM), ++ ASPEED_PINCTRL_FUNC(SGPM1), ++}; ++ ++/* number, name, drv_data */ ++static const struct pinctrl_pin_desc aspeed_g7_ltpi_pins[] = { ++ PINCTRL_PIN(D6, "D6"), ++ PINCTRL_PIN(B7, "B7"), ++ PINCTRL_PIN(A7, "A7"), ++ PINCTRL_PIN(C6, "C6"), ++ PINCTRL_PIN(B6, "B6"), ++ PINCTRL_PIN(A6, "A6"), ++ PINCTRL_PIN(C5, "C5"), ++ PINCTRL_PIN(D5, "D5"), ++ PINCTRL_PIN(L15, "L15"), ++ PINCTRL_PIN(H17, "H17"), ++ PINCTRL_PIN(L16, "L16"), ++ PINCTRL_PIN(K15, "K15"), ++ PINCTRL_PIN(K16, "K16"), ++ PINCTRL_PIN(K14, "K14"), ++ PINCTRL_PIN(J14, "J14"), ++ PINCTRL_PIN(J16, "J16"), ++ PINCTRL_PIN(J15, "J15"), ++ PINCTRL_PIN(G17, "G17"), ++ PINCTRL_PIN(F17, "F17"), ++ PINCTRL_PIN(D17, "D17"), ++ PINCTRL_PIN(C17, "C17"), ++ PINCTRL_PIN(H16, "H16"), ++ PINCTRL_PIN(B17, "B17"), ++ PINCTRL_PIN(E17, "E17"), ++ PINCTRL_PIN(A16, "A16"), ++ PINCTRL_PIN(B16, "B16"), ++ PINCTRL_PIN(H14, "H14"), ++ PINCTRL_PIN(A15, "A15"), ++ PINCTRL_PIN(G16, "G16"), ++ PINCTRL_PIN(F15, "F15"), ++ PINCTRL_PIN(H15, "H15"), ++ PINCTRL_PIN(D16, "D16"), ++ PINCTRL_PIN(M2, "M2"), ++ PINCTRL_PIN(M1, "M1"), ++ PINCTRL_PIN(M3, "M3"), ++ PINCTRL_PIN(M4, "M4"), ++ PINCTRL_PIN(N1, "N1"), ++ PINCTRL_PIN(N2, "N2"), ++ PINCTRL_PIN(N3, "N3"), ++ PINCTRL_PIN(L5, "L5"), ++ PINCTRL_PIN(P2, "P2"), ++ PINCTRL_PIN(P1, "P1"), ++ PINCTRL_PIN(N4, "N4"), ++ PINCTRL_PIN(N5, "N5"), ++ PINCTRL_PIN(HOLE0, "HOLE0"), ++ PINCTRL_PIN(P3, "P3"), ++ PINCTRL_PIN(R1, "R1"), ++ PINCTRL_PIN(P4, "P4"), ++ PINCTRL_PIN(J2, "J2"), ++ PINCTRL_PIN(J1, "J1"), ++ PINCTRL_PIN(K4, "K4"), ++ PINCTRL_PIN(K2, "K2"), ++ PINCTRL_PIN(K3, "K3"), ++ PINCTRL_PIN(K1, "K1"), ++ PINCTRL_PIN(L4, "L4"), ++ PINCTRL_PIN(L2, "L2"), ++ PINCTRL_PIN(L1, "L1"), ++ PINCTRL_PIN(L3, "L3"), ++ PINCTRL_PIN(A14, "A14"), ++ PINCTRL_PIN(B14, "B14"), ++ PINCTRL_PIN(HOLE1, "HOLE1"), ++ PINCTRL_PIN(HOLE2, "HOLE2"), ++ PINCTRL_PIN(HOLE3, "HOLE3"), ++ PINCTRL_PIN(HOLE4, "HOLE4"), ++ PINCTRL_PIN(U15, "U15"), ++ PINCTRL_PIN(U16, "U16"), ++ PINCTRL_PIN(T17, "T17"), ++ PINCTRL_PIN(P12, "P12"), ++ PINCTRL_PIN(R13, "R13"), ++ PINCTRL_PIN(T15, "T15"), ++ PINCTRL_PIN(T16, "T16"), ++ PINCTRL_PIN(R12, "R12"), ++ PINCTRL_PIN(R14, "R14"), ++ PINCTRL_PIN(R17, "R17"), ++ PINCTRL_PIN(R15, "R15"), ++ PINCTRL_PIN(P13, "P13"), ++ PINCTRL_PIN(R16, "R16"), ++ PINCTRL_PIN(P14, "P14"), ++ PINCTRL_PIN(P17, "P17"), ++ PINCTRL_PIN(P16, "P16"), ++ PINCTRL_PIN(P15, "P15"), ++ PINCTRL_PIN(N17, "N17"), ++ PINCTRL_PIN(N14, "N14"), ++ PINCTRL_PIN(M17, "M17"), ++ PINCTRL_PIN(N15, "N15"), ++ PINCTRL_PIN(N16, "N16"), ++ PINCTRL_PIN(L17, "L17"), ++ PINCTRL_PIN(K17, "K17"), ++ PINCTRL_PIN(J17, "J17"), ++ PINCTRL_PIN(M14, "M14"), ++ PINCTRL_PIN(M15, "M15"), ++ PINCTRL_PIN(M16, "M16"), ++ PINCTRL_PIN(A4, "A4"), ++ PINCTRL_PIN(B5, "B5"), ++ PINCTRL_PIN(A5, "A5"), ++ PINCTRL_PIN(B4, "B4"), ++ PINCTRL_PIN(A10, "A10"), ++ PINCTRL_PIN(B9, "B9"), ++ PINCTRL_PIN(C7, "C7"), ++ PINCTRL_PIN(E8, "E8"), ++ PINCTRL_PIN(E7, "E7"), ++ PINCTRL_PIN(B8, "B8"), ++ PINCTRL_PIN(A9, "A9"), ++ PINCTRL_PIN(A8, "A8"), ++ PINCTRL_PIN(HOLE5, "HOLE5"), ++ PINCTRL_PIN(HOLE6, "HOLE6"), ++ PINCTRL_PIN(HOLE7, "HOLE7"), ++ PINCTRL_PIN(HOLE8, "HOLE8"), ++ PINCTRL_PIN(HOLE9, "HOLE9"), ++ PINCTRL_PIN(HOLE10, "HOLE10"), ++ PINCTRL_PIN(HOLE11, "HOLE11"), ++ PINCTRL_PIN(HOLE12, "HOLE12"), ++ PINCTRL_PIN(E13, "E13"), ++ PINCTRL_PIN(C12, "C12"), ++ PINCTRL_PIN(D12, "D12"), ++ PINCTRL_PIN(E12, "E12"), ++ PINCTRL_PIN(D13, "D13"), ++ PINCTRL_PIN(HOLE13, "HOLE13"), ++ PINCTRL_PIN(B12, "B12"), ++ PINCTRL_PIN(HOLE14, "HOLE14"), ++ PINCTRL_PIN(D15, "D15"), ++ PINCTRL_PIN(B13, "B13"), ++ PINCTRL_PIN(C14, "C14"), ++ PINCTRL_PIN(C13, "C13"), ++ PINCTRL_PIN(D14, "D14"), ++ PINCTRL_PIN(F13, "F13"), ++ PINCTRL_PIN(E14, "E14"), ++ PINCTRL_PIN(HOLE15, "HOLE15"), ++ PINCTRL_PIN(HOLE16, "HOLE16"), ++ PINCTRL_PIN(HOLE17, "HOLE17"), ++ PINCTRL_PIN(B1, "B1"), ++ PINCTRL_PIN(C2, "C2"), ++ PINCTRL_PIN(D3, "D3"), ++ PINCTRL_PIN(C1, "C1"), ++ PINCTRL_PIN(E5, "E5"), ++ PINCTRL_PIN(HOLE18, "HOLE18"), ++ PINCTRL_PIN(C11, "C11"), ++ PINCTRL_PIN(D11, "D11"), ++ PINCTRL_PIN(HOLE19, "HOLE19"), ++ PINCTRL_PIN(HOLE20, "HOLE20"), ++ PINCTRL_PIN(C10, "C10"), ++ PINCTRL_PIN(D10, "D10"), ++ PINCTRL_PIN(HOLE21, "HOLE21"), ++ PINCTRL_PIN(HOLE22, "HOLE22"), ++ PINCTRL_PIN(HOLE23, "HOLE23"), ++ PINCTRL_PIN(HOLE24, "HOLE24"), ++ PINCTRL_PIN(D9, "D9"), ++ PINCTRL_PIN(E9, "E9"), ++ PINCTRL_PIN(C9, "C9"), ++ PINCTRL_PIN(D8, "D8"), ++ PINCTRL_PIN(C8, "C8"), ++ PINCTRL_PIN(B11, "B11"), ++ PINCTRL_PIN(B10, "B10"), ++ PINCTRL_PIN(A12, "A12"), ++ PINCTRL_PIN(A11, "A11"), ++ PINCTRL_PIN(D7, "D7"), ++ PINCTRL_PIN(HOLE25, "HOLE25"), ++ PINCTRL_PIN(HOLE26, "HOLE26"), ++ PINCTRL_PIN(HOLE27, "HOLE27"), ++ PINCTRL_PIN(HOLE28, "HOLE28"), ++ PINCTRL_PIN(G2, "G2"), ++ PINCTRL_PIN(G1, "G1"), ++ PINCTRL_PIN(G4, "G4"), ++ PINCTRL_PIN(H3, "H3"), ++ PINCTRL_PIN(H2, "H2"), ++ PINCTRL_PIN(H1, "H1"), ++ PINCTRL_PIN(J4, "J4"), ++ PINCTRL_PIN(J3, "J3"), ++ PINCTRL_PIN(C4, "C4"), ++ PINCTRL_PIN(A2, "A2"), ++ PINCTRL_PIN(C3, "C3"), ++ PINCTRL_PIN(B2, "B2"), ++ PINCTRL_PIN(B3, "B3"), ++ PINCTRL_PIN(A3, "A3"), ++ PINCTRL_PIN(F2, "F2"), ++ PINCTRL_PIN(F1, "F1"), ++ PINCTRL_PIN(D2, "D2"), ++ PINCTRL_PIN(D1, "D1"), ++ PINCTRL_PIN(F4, "F4"), ++ PINCTRL_PIN(E4, "E4"), ++ PINCTRL_PIN(E2, "E2"), ++ PINCTRL_PIN(E1, "E1"), ++ PINCTRL_PIN(F3, "F3"), ++ PINCTRL_PIN(G5, "G5"), ++ PINCTRL_PIN(T4, "T4"), ++ PINCTRL_PIN(T3, "T3"), ++ PINCTRL_PIN(R5, "R5"), ++ PINCTRL_PIN(R6, "R6"), ++ PINCTRL_PIN(T2, "T2"), ++ PINCTRL_PIN(T5, "T5"), ++ PINCTRL_PIN(U2, "U2"), ++ PINCTRL_PIN(R7, "R7"), ++ PINCTRL_PIN(P9, "P9"), ++ PINCTRL_PIN(U4, "U4"), ++ PINCTRL_PIN(U5, "U5"), ++ PINCTRL_PIN(U6, "U6"), ++ PINCTRL_PIN(T7, "T7"), ++ PINCTRL_PIN(U7, "U7"), ++ PINCTRL_PIN(R10, "R10"), ++ PINCTRL_PIN(R11, "R11"), ++ PINCTRL_PIN(C16, "C16"), ++ PINCTRL_PIN(F16, "F16"), ++ PINCTRL_PIN(B15, "B15"), ++ PINCTRL_PIN(G15, "G15"), ++ PINCTRL_PIN(G14, "G14"), ++ PINCTRL_PIN(F14, "F14"), ++ PINCTRL_PIN(E15, "E15"), ++ PINCTRL_PIN(C15, "C15"), ++}; ++ ++FUNCFG_DESCL(L15, PIN_CFG(TACH0, SCU404, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(H17, PIN_CFG(TACH1, SCU404, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(L16, PIN_CFG(TACH2, SCU404, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(K15, PIN_CFG(TACH3, SCU404, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(K16, PIN_CFG(TACH4, SCU404, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(K14, PIN_CFG(TACH5, SCU404, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(J14, PIN_CFG(TACH6, SCU404, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(J16, PIN_CFG(TACH7, SCU404, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(J15, PIN_CFG(TACH8, SCU408, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(G17, PIN_CFG(TACH9, SCU408, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(F17, PIN_CFG(TACH10, SCU408, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(D17, PIN_CFG(TACH11, SCU408, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(C17, PIN_CFG(TACH12, SCU408, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(H16, PIN_CFG(TACH13, SCU408, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(B17, PIN_CFG(TACH14, SCU408, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(E17, PIN_CFG(TACH15, SCU408, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(A16, PIN_CFG(PWM0, SCU40C, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(B16, PIN_CFG(PWM1, SCU40C, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(H14, PIN_CFG(PWM2, SCU40C, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(A15, PIN_CFG(PWM3, SCU40C, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(G16, PIN_CFG(PWM4, SCU40C, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(F15, PIN_CFG(PWM5, SCU40C, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(H15, PIN_CFG(PWM6, SCU40C, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(D16, PIN_CFG(PWM7, SCU40C, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(J1, PIN_CFG(I2C12, SCU418, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(K4, PIN_CFG(PWM8, SCU418, GENMASK(10, 8), (3 << 8))); ++FUNCFG_DESCL(K2, PIN_CFG(PWM9, SCU418, GENMASK(14, 12), (3 << 12)), ++ PIN_CFG(I2C12, SCU418, GENMASK(14, 12), (4 << 12))); ++FUNCFG_DESCL(K3, PIN_CFG(PWM10, SCU418, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(I2C13, SCU418, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(K1, PIN_CFG(PWM11, SCU418, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(I2C13, SCU418, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(L4, PIN_CFG(PWM12, SCU418, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(I2C14, SCU418, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(L2, PIN_CFG(PWM13, SCU418, GENMASK(30, 28), (3 << 28)), ++ PIN_CFG(I2C14, SCU418, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(L1, PIN_CFG(I2C15, SCU41C, GENMASK(2, 0), 2), ++ PIN_CFG(PWM14, SCU41C, GENMASK(2, 0), 3)); ++FUNCFG_DESCL(L3, PIN_CFG(I2C15, SCU41C, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(PWM15, SCU41C, GENMASK(6, 4), (3 << 4))); ++FUNCFG_DESCL(A14, PIN_CFG(SGPM1, SCU41C, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(B14, PIN_CFG(SGPM1, SCU41C, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(U15, PIN_CFG(HVI3C12, SCU420, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(U16, PIN_CFG(HVI3C12, SCU420, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(T17, PIN_CFG(HVI3C13, SCU420, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(P12, PIN_CFG(HVI3C13, SCU420, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(R13, PIN_CFG(HVI3C14, SCU420, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(T15, PIN_CFG(HVI3C14, SCU420, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(T16, PIN_CFG(HVI3C15, SCU420, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(R12, PIN_CFG(HVI3C15, SCU420, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(R14, PIN_CFG(I3C4, SCU424, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(R17, PIN_CFG(I3C4, SCU424, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(R15, PIN_CFG(I3C5, SCU424, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(P13, PIN_CFG(I3C5, SCU424, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(R16, PIN_CFG(I3C6, SCU424, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(P14, PIN_CFG(I3C6, SCU424, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(P17, PIN_CFG(I3C7, SCU424, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(P16, PIN_CFG(I3C7, SCU424, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(P15, PIN_CFG(I3C8, SCU428, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(N17, PIN_CFG(I3C8, SCU428, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(N14, PIN_CFG(I3C9, SCU428, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(M17, PIN_CFG(I3C9, SCU428, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(N15, PIN_CFG(I3C10, SCU428, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(N16, PIN_CFG(I3C10, SCU428, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(L17, PIN_CFG(I3C11, SCU428, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(K17, PIN_CFG(I3C11, SCU428, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(J17, PIN_CFG(HVI3C0, SCU42C, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(M14, PIN_CFG(HVI3C0, SCU42C, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(M15, PIN_CFG(HVI3C1, SCU42C, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(M16, PIN_CFG(HVI3C1, SCU42C, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(A4, PIN_CFG(HVI3C2, SCU42C, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(B5, PIN_CFG(HVI3C2, SCU42C, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(A5, PIN_CFG(HVI3C3, SCU42C, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(B4, PIN_CFG(HVI3C3, SCU42C, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(C2, PIN_CFG(JTAGM, SCU440, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(D3, PIN_CFG(JTAGM, SCU440, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(C1, PIN_CFG(JTAGM, SCU440, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(E5, PIN_CFG(JTAGM, SCU440, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(G2, PIN_CFG(I2C0, SCU454, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(G1, PIN_CFG(I2C0, SCU454, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(G4, PIN_CFG(I2C1, SCU454, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(H3, PIN_CFG(I2C1, SCU454, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(H2, PIN_CFG(I2C2, SCU454, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(H1, PIN_CFG(I2C2, SCU454, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(J4, PIN_CFG(I2C3, SCU454, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(J3, PIN_CFG(I2C3, SCU454, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(C4, PIN_CFG(I2C4, SCU458, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(A2, PIN_CFG(I2C4, SCU458, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(C3, PIN_CFG(I2C5, SCU458, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(B2, PIN_CFG(I2C5, SCU458, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(B3, PIN_CFG(I2C6, SCU458, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(A3, PIN_CFG(I2C6, SCU458, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(F2, PIN_CFG(I2C7, SCU458, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(F1, PIN_CFG(I2C7, SCU458, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(D2, PIN_CFG(I2C8, SCU45C, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(D1, PIN_CFG(I2C8, SCU45C, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(F4, PIN_CFG(I2C9, SCU45C, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(E4, PIN_CFG(I2C9, SCU45C, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(E2, PIN_CFG(I2C10, SCU45C, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(E1, PIN_CFG(I2C10, SCU45C, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(F3, PIN_CFG(I2C11, SCU45C, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(G5, PIN_CFG(I2C11, SCU45C, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(T4, PIN_CFG(ADC0, SCU460, GENMASK(2, 0), 0)); ++FUNCFG_DESCL(T3, PIN_CFG(ADC1, SCU460, GENMASK(6, 4), 0)); ++FUNCFG_DESCL(R5, PIN_CFG(ADC2, SCU460, GENMASK(10, 8), 0)); ++FUNCFG_DESCL(R6, PIN_CFG(ADC3, SCU460, GENMASK(14, 12), 0)); ++FUNCFG_DESCL(T2, PIN_CFG(ADC4, SCU460, GENMASK(18, 16), 0)); ++FUNCFG_DESCL(T5, PIN_CFG(ADC5, SCU460, GENMASK(22, 20), 0)); ++FUNCFG_DESCL(U2, PIN_CFG(ADC6, SCU460, GENMASK(26, 24), 0)); ++FUNCFG_DESCL(R7, PIN_CFG(ADC7, SCU460, GENMASK(30, 28), 0)); ++FUNCFG_DESCL(P9, PIN_CFG(ADC8, SCU464, GENMASK(2, 0), 0)); ++FUNCFG_DESCL(U4, PIN_CFG(ADC9, SCU464, GENMASK(6, 4), 0)); ++FUNCFG_DESCL(U5, PIN_CFG(ADC10, SCU464, GENMASK(10, 8), 0)); ++FUNCFG_DESCL(U6, PIN_CFG(ADC11, SCU464, GENMASK(14, 12), 0)); ++FUNCFG_DESCL(T7, PIN_CFG(ADC12, SCU464, GENMASK(18, 16), 0)); ++FUNCFG_DESCL(U7, PIN_CFG(ADC13, SCU464, GENMASK(22, 20), 0)); ++FUNCFG_DESCL(R10, PIN_CFG(ADC14, SCU464, GENMASK(26, 24), 0)); ++FUNCFG_DESCL(R11, PIN_CFG(ADC15, SCU464, GENMASK(30, 28), 0)); ++FUNCFG_DESCL(F14, PIN_CFG(SGPM1, SCU468, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(E15, PIN_CFG(SGPM1, SCU468, GENMASK(26, 24), (1 << 24))); ++ ++static const struct aspeed_g7_pincfg pin_cfg[] = { ++ PINCFG_PIN(L15), ++ PINCFG_PIN(H17), ++ PINCFG_PIN(L16), ++ PINCFG_PIN(K15), ++ PINCFG_PIN(K16), ++ PINCFG_PIN(K14), ++ PINCFG_PIN(J14), ++ PINCFG_PIN(J16), ++ PINCFG_PIN(J15), ++ PINCFG_PIN(G17), ++ PINCFG_PIN(F17), ++ PINCFG_PIN(D17), ++ PINCFG_PIN(C17), ++ PINCFG_PIN(H16), ++ PINCFG_PIN(B17), ++ PINCFG_PIN(E17), ++ PINCFG_PIN(A16), ++ PINCFG_PIN(B16), ++ PINCFG_PIN(H14), ++ PINCFG_PIN(A15), ++ PINCFG_PIN(G16), ++ PINCFG_PIN(F15), ++ PINCFG_PIN(H15), ++ PINCFG_PIN(D16), ++ PINCFG_PIN(J1), ++ PINCFG_PIN(K4), ++ PINCFG_PIN(K2), ++ PINCFG_PIN(K3), ++ PINCFG_PIN(K1), ++ PINCFG_PIN(L4), ++ PINCFG_PIN(L2), ++ PINCFG_PIN(L1), ++ PINCFG_PIN(L3), ++ PINCFG_PIN(A14), ++ PINCFG_PIN(B14), ++ PINCFG_PIN(U15), ++ PINCFG_PIN(U16), ++ PINCFG_PIN(T17), ++ PINCFG_PIN(P12), ++ PINCFG_PIN(R13), ++ PINCFG_PIN(T15), ++ PINCFG_PIN(T16), ++ PINCFG_PIN(R12), ++ PINCFG_PIN(R14), ++ PINCFG_PIN(R17), ++ PINCFG_PIN(R15), ++ PINCFG_PIN(P13), ++ PINCFG_PIN(R16), ++ PINCFG_PIN(P14), ++ PINCFG_PIN(P17), ++ PINCFG_PIN(P16), ++ PINCFG_PIN(P15), ++ PINCFG_PIN(N17), ++ PINCFG_PIN(N14), ++ PINCFG_PIN(M17), ++ PINCFG_PIN(N15), ++ PINCFG_PIN(N16), ++ PINCFG_PIN(L17), ++ PINCFG_PIN(K17), ++ PINCFG_PIN(J17), ++ PINCFG_PIN(M14), ++ PINCFG_PIN(M15), ++ PINCFG_PIN(M16), ++ PINCFG_PIN(A4), ++ PINCFG_PIN(B5), ++ PINCFG_PIN(A5), ++ PINCFG_PIN(B4), ++ PINCFG_PIN(C2), ++ PINCFG_PIN(D3), ++ PINCFG_PIN(C1), ++ PINCFG_PIN(E5), ++ PINCFG_PIN(G2), ++ PINCFG_PIN(G1), ++ PINCFG_PIN(G4), ++ PINCFG_PIN(H3), ++ PINCFG_PIN(H2), ++ PINCFG_PIN(H1), ++ PINCFG_PIN(J4), ++ PINCFG_PIN(J3), ++ PINCFG_PIN(C4), ++ PINCFG_PIN(A2), ++ PINCFG_PIN(C3), ++ PINCFG_PIN(B2), ++ PINCFG_PIN(B3), ++ PINCFG_PIN(A3), ++ PINCFG_PIN(F2), ++ PINCFG_PIN(F1), ++ PINCFG_PIN(D2), ++ PINCFG_PIN(D1), ++ PINCFG_PIN(F4), ++ PINCFG_PIN(E4), ++ PINCFG_PIN(E2), ++ PINCFG_PIN(E1), ++ PINCFG_PIN(F3), ++ PINCFG_PIN(G5), ++ PINCFG_PIN(T4), ++ PINCFG_PIN(T3), ++ PINCFG_PIN(R5), ++ PINCFG_PIN(R6), ++ PINCFG_PIN(T2), ++ PINCFG_PIN(T5), ++ PINCFG_PIN(U2), ++ PINCFG_PIN(R7), ++ PINCFG_PIN(P9), ++ PINCFG_PIN(U4), ++ PINCFG_PIN(U5), ++ PINCFG_PIN(U6), ++ PINCFG_PIN(T7), ++ PINCFG_PIN(U7), ++ PINCFG_PIN(R10), ++ PINCFG_PIN(R11), ++ PINCFG_PIN(F14), ++ PINCFG_PIN(E15), ++}; ++ ++static int aspeed_g7_ltpi_dt_node_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np_config, ++ struct pinctrl_map **map, u32 *num_maps) ++{ ++ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, ++ PIN_MAP_TYPE_INVALID); ++} ++ ++static void aspeed_g7_ltpi_dt_free_map(struct pinctrl_dev *pctldev, ++ struct pinctrl_map *map, u32 num_maps) ++{ ++ kfree(map); ++} ++ ++static const struct pinctrl_ops aspeed_g7_ltpi_pinctrl_ops = { ++ .get_groups_count = aspeed_pinctrl_get_groups_count, ++ .get_group_name = aspeed_pinctrl_get_group_name, ++ .get_group_pins = aspeed_pinctrl_get_group_pins, ++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show, ++ .dt_node_to_map = aspeed_g7_ltpi_dt_node_to_map, ++ .dt_free_map = aspeed_g7_ltpi_dt_free_map, ++}; ++ ++static const struct pinmux_ops aspeed_g7_ltpi_pinmux_ops = { ++ .get_functions_count = aspeed_pinmux_get_fn_count, ++ .get_function_name = aspeed_pinmux_get_fn_name, ++ .get_function_groups = aspeed_pinmux_get_fn_groups, ++ .set_mux = aspeed_g7_pinmux_set_mux, ++ .gpio_request_enable = aspeed_g7_gpio_request_enable, ++ .strict = true, ++}; ++ ++static const struct pinconf_ops aspeed_g7_ltpi_pinconf_ops = { ++ .is_generic = true, ++ .pin_config_get = aspeed_pin_config_get, ++ .pin_config_set = aspeed_pin_config_set, ++ .pin_config_group_get = aspeed_pin_config_group_get, ++ .pin_config_group_set = aspeed_pin_config_group_set, ++}; ++ ++/* pinctrl_desc */ ++static struct pinctrl_desc aspeed_g7_ltpi_pinctrl_desc = { ++ .name = "aspeed-g7-ltpi-pinctrl", ++ .pins = aspeed_g7_ltpi_pins, ++ .npins = ARRAY_SIZE(aspeed_g7_ltpi_pins), ++ .pctlops = &aspeed_g7_ltpi_pinctrl_ops, ++ .pmxops = &aspeed_g7_ltpi_pinmux_ops, ++ .confops = &aspeed_g7_ltpi_pinconf_ops, ++ .owner = THIS_MODULE, ++}; ++ ++static int aspeed_g7_ltpi_pinctrl_probe(struct platform_device *pdev) ++{ ++ struct aspeed_pinctrl_data *ltpi_pinctrl_data; ++ ++ ltpi_pinctrl_data = devm_kzalloc(&pdev->dev, sizeof(*ltpi_pinctrl_data), ++ GFP_KERNEL); ++ if (!ltpi_pinctrl_data) ++ return -ENOMEM; ++ ++ ltpi_pinctrl_data->pins = aspeed_g7_ltpi_pins; ++ ltpi_pinctrl_data->npins = ARRAY_SIZE(aspeed_g7_ltpi_pins); ++ ltpi_pinctrl_data->pinmux.groups = aspeed_g7_ltpi_pingroups; ++ ltpi_pinctrl_data->pinmux.ngroups = ARRAY_SIZE(aspeed_g7_ltpi_pingroups); ++ ltpi_pinctrl_data->pinmux.functions = aspeed_g7_ltpi_funcs; ++ ltpi_pinctrl_data->pinmux.nfunctions = ARRAY_SIZE(aspeed_g7_ltpi_funcs); ++ ltpi_pinctrl_data->pinmux.configs_g7 = pin_cfg; ++ ltpi_pinctrl_data->pinmux.nconfigs_g7 = ARRAY_SIZE(pin_cfg); ++ ++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_ltpi_pinctrl_desc, ltpi_pinctrl_data); ++} ++ ++static const struct of_device_id aspeed_g7_ltpi_pinctrl_match[] = { ++ { .compatible = "aspeed,ast1700-pinctrl" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_g7_ltpi_pinctrl_match); ++ ++static struct platform_driver aspeed_g7_ltpi_pinctrl_driver = { ++ .probe = aspeed_g7_ltpi_pinctrl_probe, ++ .driver = { ++ .name = "aspeed-g7-ltpi-pinctrl", ++ .of_match_table = aspeed_g7_ltpi_pinctrl_match, ++ .suppress_bind_attrs = true, ++ }, ++}; ++ ++static int __init aspeed_g7_ltpi_pinctrl_register(void) ++{ ++ return platform_driver_register(&aspeed_g7_ltpi_pinctrl_driver); ++} ++arch_initcall(aspeed_g7_ltpi_pinctrl_register); +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c 2025-12-23 10:16:21.136032468 +0000 +@@ -0,0 +1,503 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pinctrl-aspeed.h" ++#include "../pinctrl-utils.h" ++ ++#define SCU200 0x200 /* System Reset Control #1 */ ++ ++#define SCU400 0x400 /* Multi-function Pin Control #1 */ ++#define SCU404 0x404 /* Multi-function Pin Control #2 */ ++#define SCU408 0x408 /* Multi-function Pin Control #3 */ ++#define SCU40C 0x40C /* Multi-function Pin Control #3 */ ++#define SCU410 0x410 /* USB Multi-function Control Register */ ++#define SCU414 0x414 /* VGA Function Control Register */ ++ ++#define SCU480 0x480 /* GPIO18A0 IO Control Register */ ++#define SCU484 0x484 /* GPIO18A1 IO Control Register */ ++#define SCU488 0x488 /* GPIO18A2 IO Control Register */ ++#define SCU48C 0x48c /* GPIO18A3 IO Control Register */ ++#define SCU490 0x490 /* GPIO18A4 IO Control Register */ ++#define SCU494 0x494 /* GPIO18A5 IO Control Register */ ++#define SCU498 0x498 /* GPIO18A6 IO Control Register */ ++#define SCU49C 0x49c /* GPIO18A7 IO Control Register */ ++#define SCU4A0 0x4A0 /* GPIO18B0 IO Control Register */ ++#define SCU4A4 0x4A4 /* GPIO18B1 IO Control Register */ ++#define SCU4A8 0x4A8 /* GPIO18B2 IO Control Register */ ++#define SCU4AC 0x4AC /* GPIO18B3 IO Control Register */ ++ ++enum { ++ AC14, ++ AE15, ++ AD14, ++ AE14, ++ AF14, ++ AB13, ++ AB14, ++ AF15, ++ AF13, ++ AC13, ++ AD13, ++ AE13, ++ PORTA_U3, // SCU410[1:0] ++ PORTA_U2, // SCU410[3:2] ++ PORTB_U3, // SCU410[5:4] ++ PORTB_U2, // SCU410[7:6] ++ PORTA_U3_XHCI, // SCU410[9] ++ PORTA_U2_XHCI, // SCU410[9] ++ PORTB_U3_XHCI, // SCU410[10] ++ PORTB_U2_XHCI, // SCU410[10] ++ PORTA_MODE, // SCU410[25:24] ++ PORTB_MODE, // SCU410[29:28] ++ PORTA_U2_PHY, ++ PORTA_U3_PHY, ++ PORTB_U2_PHY, ++ PORTB_U3_PHY, ++ JTAG_PORT, ++ PCIERC0_PERST, ++ PCIERC1_PERST, ++}; ++ ++GROUP_DECL(EMMCG1, AC14, AE15, AD14); ++GROUP_DECL(EMMCG4, AC14, AE15, AD14, AE14, AF14, AB13); ++GROUP_DECL(EMMCG8, AC14, AE15, AD14, AE14, AF14, AB13, AF13, AC13, AD13, AE13); ++GROUP_DECL(EMMCWPN, AF15); ++GROUP_DECL(EMMCCDN, AB14); ++GROUP_DECL(VGADDC, AD13, AE13); ++GROUP_DECL(VB1, AC14, AE15, AD14, AE14); ++GROUP_DECL(VB0, AF15, AB14, AF13, AC13); ++//USB3A ++//xhci: BMC/PCIE, vHub/PHY/EXT port ++GROUP_DECL(USB3AXHD, PORTA_U3, PORTA_U3_XHCI); ++GROUP_DECL(USB3AXHPD, PORTA_U3, PORTA_U3_XHCI); ++GROUP_DECL(USB3AXH, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY); ++GROUP_DECL(USB3AXHP, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY); ++GROUP_DECL(USB3AXH2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY); ++GROUP_DECL(USB3AXHP2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY); ++ ++//USB2A ++//xhci: BMC/PCIE, vHub/PHY/EXT port ++GROUP_DECL(USB2AXHD1, PORTA_U2, PORTA_U2_XHCI); ++GROUP_DECL(USB2AXHPD1, PORTA_U2, PORTA_U2_XHCI); ++GROUP_DECL(USB2AXH, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY); ++GROUP_DECL(USB2AXHP, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY); ++GROUP_DECL(USB2AXH2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY); ++GROUP_DECL(USB2AXHP2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY); ++// vhub to phy ++GROUP_DECL(USB2AD1, PORTA_U2, PORTA_U2_PHY); ++//ehci ++GROUP_DECL(USB2AHPD0, PORTA_MODE); ++GROUP_DECL(USB2AH, PORTA_MODE, PORTA_U2_PHY); ++GROUP_DECL(USB2AHP, PORTA_MODE, PORTA_U2_PHY); ++GROUP_DECL(USB2AD0, PORTA_MODE, PORTA_U2_PHY); ++ ++//USB3B ++//xhci: BMC/PCIE, vHub/PHY/EXT port ++GROUP_DECL(USB3BXHD, PORTB_U3, PORTB_U3_XHCI); ++GROUP_DECL(USB3BXHPD, PORTB_U3, PORTB_U3_XHCI); ++GROUP_DECL(USB3BXH, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY); ++GROUP_DECL(USB3BXHP, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY); ++GROUP_DECL(USB3BXH2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY); ++GROUP_DECL(USB3BXHP2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY); ++ ++//USB2B ++//xhci: BMC/PCIE, vHub/PHY/EXT port ++GROUP_DECL(USB2BXHD1, PORTB_U2, PORTB_U2_XHCI); ++GROUP_DECL(USB2BXHPD1, PORTB_U2, PORTB_U2_XHCI); ++GROUP_DECL(USB2BXH, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY); ++GROUP_DECL(USB2BXHP, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY); ++GROUP_DECL(USB2BXH2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY); ++GROUP_DECL(USB2BXHP2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY); ++// vhub to phy ++GROUP_DECL(USB2BD1, PORTB_U2, PORTB_U2_PHY); ++//ehci ++GROUP_DECL(USB2BHPD0, PORTB_MODE); ++GROUP_DECL(USB2BH, PORTB_MODE, PORTB_U2_PHY); ++GROUP_DECL(USB2BHP, PORTB_MODE, PORTB_U2_PHY); ++GROUP_DECL(USB2BD0, PORTB_MODE, PORTB_U2_PHY); ++//JTAG port ++GROUP_DECL(PSP, JTAG_PORT); ++GROUP_DECL(SSP, JTAG_PORT); ++GROUP_DECL(TSP, JTAG_PORT); ++GROUP_DECL(DDR, JTAG_PORT); ++GROUP_DECL(USB3A, JTAG_PORT); ++GROUP_DECL(USB3B, JTAG_PORT); ++GROUP_DECL(PCIEA, JTAG_PORT); ++GROUP_DECL(PCIEB, JTAG_PORT); ++GROUP_DECL(JTAGM0, JTAG_PORT); ++//PCIE RC PERST ++GROUP_DECL(PCIERC0PERST, PCIERC0_PERST); ++GROUP_DECL(PCIERC1PERST, PCIERC1_PERST); ++ ++static struct aspeed_pin_group aspeed_g7_soc0_pingroups[] = { ++ ASPEED_PINCTRL_GROUP(EMMCG1), ++ ASPEED_PINCTRL_GROUP(EMMCG4), ++ ASPEED_PINCTRL_GROUP(EMMCG8), ++ ASPEED_PINCTRL_GROUP(EMMCWPN), ++ ASPEED_PINCTRL_GROUP(EMMCCDN), ++ ASPEED_PINCTRL_GROUP(VGADDC), ++ ASPEED_PINCTRL_GROUP(VB1), ++ ASPEED_PINCTRL_GROUP(VB0), ++ ASPEED_PINCTRL_GROUP(USB3AXHD), ++ ASPEED_PINCTRL_GROUP(USB3AXHPD), ++ ASPEED_PINCTRL_GROUP(USB3AXH), ++ ASPEED_PINCTRL_GROUP(USB3AXHP), ++ ASPEED_PINCTRL_GROUP(USB3AXH2B), ++ ASPEED_PINCTRL_GROUP(USB3AXHP2B), ++ ASPEED_PINCTRL_GROUP(USB2AXHD1), ++ ASPEED_PINCTRL_GROUP(USB2AXHPD1), ++ ASPEED_PINCTRL_GROUP(USB2AXH), ++ ASPEED_PINCTRL_GROUP(USB2AXHP), ++ ASPEED_PINCTRL_GROUP(USB2AXH2B), ++ ASPEED_PINCTRL_GROUP(USB2AXHP2B), ++ ASPEED_PINCTRL_GROUP(USB2AD1), ++ ASPEED_PINCTRL_GROUP(USB2AHPD0), ++ ASPEED_PINCTRL_GROUP(USB2AH), ++ ASPEED_PINCTRL_GROUP(USB2AHP), ++ ASPEED_PINCTRL_GROUP(USB2AD0), ++ ASPEED_PINCTRL_GROUP(USB3BXHD), ++ ASPEED_PINCTRL_GROUP(USB3BXHPD), ++ ASPEED_PINCTRL_GROUP(USB3BXH), ++ ASPEED_PINCTRL_GROUP(USB3BXHP), ++ ASPEED_PINCTRL_GROUP(USB3BXH2A), ++ ASPEED_PINCTRL_GROUP(USB3BXHP2A), ++ ASPEED_PINCTRL_GROUP(USB2BXHD1), ++ ASPEED_PINCTRL_GROUP(USB2BXHPD1), ++ ASPEED_PINCTRL_GROUP(USB2BXH), ++ ASPEED_PINCTRL_GROUP(USB2BXHP), ++ ASPEED_PINCTRL_GROUP(USB2BXH2A), ++ ASPEED_PINCTRL_GROUP(USB2BXHP2A), ++ ASPEED_PINCTRL_GROUP(USB2BD1), ++ ASPEED_PINCTRL_GROUP(USB2BHPD0), ++ ASPEED_PINCTRL_GROUP(USB2BH), ++ ASPEED_PINCTRL_GROUP(USB2BHP), ++ ASPEED_PINCTRL_GROUP(USB2BD0), ++ ASPEED_PINCTRL_GROUP(PSP), ++ ASPEED_PINCTRL_GROUP(SSP), ++ ASPEED_PINCTRL_GROUP(TSP), ++ ASPEED_PINCTRL_GROUP(DDR), ++ ASPEED_PINCTRL_GROUP(USB3A), ++ ASPEED_PINCTRL_GROUP(USB3B), ++ ASPEED_PINCTRL_GROUP(PCIEA), ++ ASPEED_PINCTRL_GROUP(PCIEB), ++ ASPEED_PINCTRL_GROUP(JTAGM0), ++ ASPEED_PINCTRL_GROUP(PCIERC0PERST), ++ ASPEED_PINCTRL_GROUP(PCIERC1PERST), ++}; ++ ++FUNC_DECL_(EMMC, "EMMCG1", "EMMCG4", "EMMCG8", "EMMCWPN", "EMMCCDN"); ++FUNC_DECL_(VGADDC, "VGADDC"); ++FUNC_DECL_(VB, "VB0", "VB1"); ++FUNC_DECL_(USB3A, "USB3AXHD", "USB3AXHPD", "USB3AXH", "USB3AXHP", "USB3AXH2B", ++ "USB3AXHP2B"); ++FUNC_DECL_(USB2A, "USB2AXHD1", "USB2AXHPD1", "USB2AXH", "USB2AXHP", "USB2AXH2B", ++ "USB2AXHP2B", "USB2AD1", "USB2AHPD0", "USB2AH", "USB2AHP", ++ "USB2AD0"); ++FUNC_DECL_(USB3B, "USB3BXHD", "USB3BXHPD", "USB3BXH", "USB3BXHP", "USB3BXH2A", ++ "USB3BXHP2A"); ++FUNC_DECL_(USB2B, "USB2BXHD1", "USB2BXHPD1", "USB2BXH", "USB2BXHP", "USB2BXH2A", ++ "USB2BXHP2A", "USB2BD1", "USB2BHPD0", "USB2BH", "USB2BHP", ++ "USB2BD0"); ++FUNC_DECL_(JTAG0, "PSP", "SSP", "TSP", "DDR", "USB3A", "USB3B", ++ "PCIEA", "PCIEB", "JTAGM0"); ++FUNC_DECL_(PCIERC, "PCIERC0PERST", "PCIERC1PERST"); ++ ++static struct aspeed_pin_function aspeed_g7_soc0_funcs[] = { ++ ASPEED_PINCTRL_FUNC(EMMC), ++ ASPEED_PINCTRL_FUNC(VGADDC), ++ ASPEED_PINCTRL_FUNC(VB), ++ ASPEED_PINCTRL_FUNC(USB3A), ++ ASPEED_PINCTRL_FUNC(USB2A), ++ ASPEED_PINCTRL_FUNC(USB3B), ++ ASPEED_PINCTRL_FUNC(USB2B), ++ ASPEED_PINCTRL_FUNC(JTAG0), ++ ASPEED_PINCTRL_FUNC(PCIERC), ++}; ++ ++static const struct pinctrl_pin_desc aspeed_g7_soc0_pins[] = { ++ PINCTRL_PIN(AC14, "AC14"), ++ PINCTRL_PIN(AE15, "AE15"), ++ PINCTRL_PIN(AD14, "AD14"), ++ PINCTRL_PIN(AE14, "AE14"), ++ PINCTRL_PIN(AF14, "AF14"), ++ PINCTRL_PIN(AB13, "AB13"), ++ PINCTRL_PIN(AF15, "AF15"), ++ PINCTRL_PIN(AB14, "AB14"), ++ PINCTRL_PIN(AF13, "AF13"), ++ PINCTRL_PIN(AC13, "AC13"), ++ PINCTRL_PIN(AD13, "AD13"), ++ PINCTRL_PIN(AE13, "AE13"), ++ PINCTRL_PIN(PORTA_U3, "PORTA_U3"), ++ PINCTRL_PIN(PORTA_U2, "PORTA_U2"), ++ PINCTRL_PIN(PORTB_U3, "PORTB_U3"), ++ PINCTRL_PIN(PORTB_U2, "PORTB_U2"), ++ PINCTRL_PIN(PORTA_U3_XHCI, "PORTA_U3_XHCI"), ++ PINCTRL_PIN(PORTA_U2_XHCI, "PORTA_U2_XHCI"), ++ PINCTRL_PIN(PORTB_U3_XHCI, "PORTB_U3_XHCI"), ++ PINCTRL_PIN(PORTB_U2_XHCI, "PORTB_U2_XHCI"), ++ PINCTRL_PIN(PORTA_MODE, "PORTA_MODE"), ++ PINCTRL_PIN(PORTA_U3_PHY, "PORTA_U3_PHY"), ++ PINCTRL_PIN(PORTA_U2_PHY, "PORTA_U2_PHY"), ++ PINCTRL_PIN(PORTB_MODE, "PORTB_MODE"), ++ PINCTRL_PIN(PORTB_U3_PHY, "PORTB_U3_PHY"), ++ PINCTRL_PIN(PORTB_U2_PHY, "PORTB_U2_PHY"), ++ PINCTRL_PIN(JTAG_PORT, "JTAG_PORT"), ++ PINCTRL_PIN(PCIERC0_PERST, "PCIERC0_PERST"), ++ PINCTRL_PIN(PCIERC1_PERST, "PCIERC1_PERST"), ++}; ++ ++FUNCFG_DESCL(AC14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(0), BIT(0)), ++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(0), BIT(0)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(0), BIT(0)), ++ PIN_CFG(VB1, SCU404, BIT_MASK(0), BIT(0))); ++FUNCFG_DESCL(AE15, PIN_CFG(EMMCG1, SCU400, BIT_MASK(1), BIT(1)), ++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(1), BIT(1)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(1), BIT(1)), ++ PIN_CFG(VB1, SCU404, BIT_MASK(1), BIT(1))); ++FUNCFG_DESCL(AD14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(2), BIT(2)), ++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(2), BIT(2)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(2), BIT(2)), ++ PIN_CFG(VB1, SCU404, BIT_MASK(2), BIT(2))); ++FUNCFG_DESCL(AE14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(3), BIT(3)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(3), BIT(3)), ++ PIN_CFG(VB1, SCU404, BIT_MASK(3), BIT(3))); ++FUNCFG_DESCL(AF14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(4), BIT(4)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(4), BIT(4))); ++FUNCFG_DESCL(AB13, PIN_CFG(EMMCG4, SCU400, BIT_MASK(5), BIT(5)), ++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(5), BIT(5))); ++FUNCFG_DESCL(AB14, PIN_CFG(EMMCCDN, SCU400, BIT_MASK(6), BIT(6)), ++ PIN_CFG(VB0, SCU404, BIT_MASK(6), BIT(6))); ++FUNCFG_DESCL(AF15, PIN_CFG(EMMCWPN, SCU400, BIT_MASK(7), BIT(7)), ++ PIN_CFG(VB0, SCU404, BIT_MASK(7), BIT(7))); ++FUNCFG_DESCL(AF13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(8), BIT(8)), ++ PIN_CFG(VB0, SCU404, BIT_MASK(8), BIT(8))); ++FUNCFG_DESCL(AC13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(9), BIT(9)), ++ PIN_CFG(VB0, SCU404, BIT_MASK(9), BIT(9))); ++FUNCFG_DESCL(AD13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(10), BIT(10)), ++ PIN_CFG(VGADDC, SCU404, BIT_MASK(10), BIT(10))); ++FUNCFG_DESCL(AE13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(11), BIT(11)), ++ PIN_CFG(VGADDC, SCU404, BIT_MASK(11), BIT(11))); ++FUNCFG_DESCL(PORTA_U3, PIN_CFG(USB3AXHD, SCU410, GENMASK(1, 0), 0), ++ PIN_CFG(USB3AXHPD, SCU410, GENMASK(1, 0), 0), ++ PIN_CFG(USB3AXH, SCU410, GENMASK(1, 0), 2), ++ PIN_CFG(USB3AXHP, SCU410, GENMASK(1, 0), 2), ++ PIN_CFG(USB3AXH2B, SCU410, GENMASK(1, 0), 3), ++ PIN_CFG(USB3AXHP2B, SCU410, GENMASK(1, 0), 3)); ++FUNCFG_DESCL(PORTA_U2, PIN_CFG(USB2AXHD1, SCU410, GENMASK(3, 2), 0), ++ PIN_CFG(USB2AXHPD1, SCU410, GENMASK(3, 2), 0), ++ PIN_CFG(USB2AXH, SCU410, GENMASK(3, 2), 2 << 2), ++ PIN_CFG(USB2AXHP, SCU410, GENMASK(3, 2), 2 << 2), ++ PIN_CFG(USB2AXH2B, SCU410, GENMASK(3, 2), 3 << 2), ++ PIN_CFG(USB2AXHP2B, SCU410, GENMASK(3, 2), 3 << 2), ++ PIN_CFG(USB2AD1, SCU410, GENMASK(3, 2), 1 << 2)); ++FUNCFG_DESCL(PORTB_U3, PIN_CFG(USB3BXHD, SCU410, GENMASK(5, 4), 0), ++ PIN_CFG(USB3BXHPD, SCU410, GENMASK(5, 4), 0), ++ PIN_CFG(USB3BXH, SCU410, GENMASK(5, 4), 2 << 4), ++ PIN_CFG(USB3BXHP, SCU410, GENMASK(5, 4), 2 << 4), ++ PIN_CFG(USB3BXH2A, SCU410, GENMASK(5, 4), 3 << 4), ++ PIN_CFG(USB3BXHP2A, SCU410, GENMASK(5, 4), 3 << 4)); ++FUNCFG_DESCL(PORTB_U2, PIN_CFG(USB2BXHD1, SCU410, GENMASK(7, 6), 0), ++ PIN_CFG(USB2BXHPD1, SCU410, GENMASK(7, 6), 0), ++ PIN_CFG(USB2BXH, SCU410, GENMASK(7, 6), 2 << 6), ++ PIN_CFG(USB2BXHP, SCU410, GENMASK(7, 6), 2 << 6), ++ PIN_CFG(USB2BXH2A, SCU410, GENMASK(7, 6), 3 << 6), ++ PIN_CFG(USB2BXHP2A, SCU410, GENMASK(7, 6), 3 << 6), ++ PIN_CFG(USB2BD1, SCU410, GENMASK(7, 6), 1 << 6)); ++FUNCFG_DESCL(PORTA_U3_XHCI, PIN_CFG(USB3AXHD, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB3AXHPD, SCU410, BIT_MASK(9), 0), ++ PIN_CFG(USB3AXH, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB3AXHP, SCU410, BIT_MASK(9), 0), ++ PIN_CFG(USB3AXH2B, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB3AXHP2B, SCU410, BIT_MASK(9), 0)); ++FUNCFG_DESCL(PORTA_U2_XHCI, PIN_CFG(USB2AXHD1, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB2AXHPD1, SCU410, BIT_MASK(9), 0), ++ PIN_CFG(USB2AXH, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB2AXHP, SCU410, BIT_MASK(9), 0), ++ PIN_CFG(USB2AXH2B, SCU410, BIT_MASK(9), 1 << 9), ++ PIN_CFG(USB2AXHP2B, SCU410, BIT_MASK(9), 0)); ++FUNCFG_DESCL(PORTB_U3_XHCI, PIN_CFG(USB3BXHD, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB3BXHPD, SCU410, BIT_MASK(10), 0), ++ PIN_CFG(USB3BXH, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB3BXHP, SCU410, BIT_MASK(10), 0), ++ PIN_CFG(USB3BXH2A, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB3BXHP2A, SCU410, BIT_MASK(10), 0)); ++FUNCFG_DESCL(PORTB_U2_XHCI, PIN_CFG(USB2BXHD1, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB2BXHPD1, SCU410, BIT_MASK(10), 0), ++ PIN_CFG(USB2BXH, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB2BXHP, SCU410, BIT_MASK(10), 0), ++ PIN_CFG(USB2BXH2A, SCU410, BIT_MASK(10), 1 << 10), ++ PIN_CFG(USB2BXHP2A, SCU410, BIT_MASK(10), 0)); ++FUNCFG_DESCL(PORTA_MODE, PIN_CFG(USB2AHPD0, SCU410, GENMASK(25, 24), 0), ++ PIN_CFG(USB2AH, SCU410, GENMASK(25, 24), 2 << 24), ++ PIN_CFG(USB2AHP, SCU410, GENMASK(25, 24), 3 << 24), ++ PIN_CFG(USB2AD0, SCU410, GENMASK(25, 24), 1 << 24)); ++FUNCFG_DESCL(PORTB_MODE, PIN_CFG(USB2BHPD0, SCU410, GENMASK(29, 28), 0), ++ PIN_CFG(USB2BH, SCU410, GENMASK(29, 28), 2 << 28), ++ PIN_CFG(USB2BHP, SCU410, GENMASK(29, 28), 3 << 28), ++ PIN_CFG(USB2BD0, SCU410, GENMASK(29, 28), 1 << 28)); ++FUNCFG_DESCL(PORTA_U3_PHY); ++FUNCFG_DESCL(PORTA_U2_PHY); ++FUNCFG_DESCL(PORTB_U3_PHY); ++FUNCFG_DESCL(PORTB_U2_PHY); ++FUNCFG_DESCL(JTAG_PORT, PIN_CFG(PSP, SCU408, GENMASK(12, 5), 0x0 << 5), ++ PIN_CFG(SSP, SCU408, GENMASK(12, 5), 0x41 << 5), ++ PIN_CFG(TSP, SCU408, GENMASK(12, 5), 0x42 << 5), ++ PIN_CFG(DDR, SCU408, GENMASK(12, 5), 0x43 << 5), ++ PIN_CFG(USB3A, SCU408, GENMASK(12, 5), 0x44 << 5), ++ PIN_CFG(USB3B, SCU408, GENMASK(12, 5), 0x45 << 5), ++ PIN_CFG(PCIEA, SCU408, GENMASK(12, 5), 0x46 << 5), ++ PIN_CFG(PCIEB, SCU408, GENMASK(12, 5), 0x47 << 5), ++ PIN_CFG(JTAGM0, SCU408, GENMASK(12, 5), 0x8 << 5)); ++FUNCFG_DESCL(PCIERC0_PERST, PIN_CFG(PCIERC0PERST, SCU200, BIT_MASK(21), 1 << 21)); ++FUNCFG_DESCL(PCIERC1_PERST, PIN_CFG(PCIERC1PERST, SCU200, BIT_MASK(19), 1 << 19)); ++ ++static const struct aspeed_g7_pincfg pin_cfg[] = { ++ PINCFG_PIN(AC14), PINCFG_PIN(AE15), ++ PINCFG_PIN(AD14), PINCFG_PIN(AE14), ++ PINCFG_PIN(AF14), PINCFG_PIN(AB13), ++ PINCFG_PIN(AB14), PINCFG_PIN(AF15), ++ PINCFG_PIN(AF13), PINCFG_PIN(AC13), ++ PINCFG_PIN(AD13), PINCFG_PIN(AE13), ++ PINCFG_PIN(PORTA_U3), PINCFG_PIN(PORTA_U2), ++ PINCFG_PIN(PORTB_U3), PINCFG_PIN(PORTB_U2), ++ PINCFG_PIN(PORTA_U3_XHCI), PINCFG_PIN(PORTA_U2_XHCI), ++ PINCFG_PIN(PORTB_U3_XHCI), PINCFG_PIN(PORTB_U2_XHCI), ++ PINCFG_PIN(PORTA_MODE), PINCFG_PIN(PORTB_MODE), ++ PINCFG_PIN(PORTA_U3_PHY), PINCFG_PIN(PORTA_U2_PHY), ++ PINCFG_PIN(PORTB_U3_PHY), PINCFG_PIN(PORTB_U2_PHY), ++ PINCFG_PIN(JTAG_PORT), PINCFG_PIN(PCIERC0_PERST), ++ PINCFG_PIN(PCIERC1_PERST), ++}; ++ ++static const struct pinctrl_ops aspeed_g7_soc0_pinctrl_ops = { ++ .get_groups_count = aspeed_pinctrl_get_groups_count, ++ .get_group_name = aspeed_pinctrl_get_group_name, ++ .get_group_pins = aspeed_pinctrl_get_group_pins, ++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show, ++ .dt_node_to_map = pinconf_generic_dt_node_to_map_all, ++ .dt_free_map = pinctrl_utils_free_map, ++}; ++ ++static const struct pinmux_ops aspeed_g7_soc0_pinmux_ops = { ++ .get_functions_count = aspeed_pinmux_get_fn_count, ++ .get_function_name = aspeed_pinmux_get_fn_name, ++ .get_function_groups = aspeed_pinmux_get_fn_groups, ++ .set_mux = aspeed_g7_pinmux_set_mux, ++ .gpio_request_enable = aspeed_g7_gpio_request_enable, ++ .strict = true, ++}; ++ ++static const struct pinconf_ops aspeed_g7_soc0_pinconf_ops = { ++ .is_generic = true, ++ .pin_config_get = aspeed_pin_config_get, ++ .pin_config_set = aspeed_pin_config_set, ++ .pin_config_group_get = aspeed_pin_config_group_get, ++ .pin_config_group_set = aspeed_pin_config_group_set, ++}; ++ ++/* pinctrl_desc */ ++static struct pinctrl_desc aspeed_g7_soc0_pinctrl_desc = { ++ .name = "aspeed-g7-soc0-pinctrl", ++ .pins = aspeed_g7_soc0_pins, ++ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins), ++ .pctlops = &aspeed_g7_soc0_pinctrl_ops, ++ .pmxops = &aspeed_g7_soc0_pinmux_ops, ++ .confops = &aspeed_g7_soc0_pinconf_ops, ++ .owner = THIS_MODULE, ++}; ++ ++static struct aspeed_pin_config aspeed_g7_configs[] = { ++ /* GPIO18A */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { AC14, AC14 }, SCU480, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE15, AE15 }, SCU484, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AD14, AD14 }, SCU488, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE14, AE14 }, SCU48C, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AF14, AF14 }, SCU490, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AB13, AB13 }, SCU494, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AB14, AB14 }, SCU498, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AF15, AF15 }, SCU49C, GENMASK(3, 0) }, ++ /* GPIO18B */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { AF13, AF13 }, SCU4A0, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AC13, AC13 }, SCU4A4, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AD13, AD13 }, SCU4A8, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE13, AE13 }, SCU4AC, GENMASK(3, 0) }, ++}; ++ ++static const struct aspeed_pin_config_map aspeed_g7_pin_config_map[] = { ++ { PIN_CONFIG_DRIVE_STRENGTH, 0, 0, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 1, 1, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 2, 2, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 3, 3, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 4, 4, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 5, 5, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 6, 6, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 7, 7, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 8, 8, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 9, 9, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 10, 10, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 11, 11, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 12, 12, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 13, 13, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 14, 14, GENMASK(3, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, 15, 15, GENMASK(3, 0) }, ++ ++}; ++ ++static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = { ++ .pins = aspeed_g7_soc0_pins, ++ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins), ++ .pinmux = { ++ .groups = aspeed_g7_soc0_pingroups, ++ .ngroups = ARRAY_SIZE(aspeed_g7_soc0_pingroups), ++ .functions = aspeed_g7_soc0_funcs, ++ .nfunctions = ARRAY_SIZE(aspeed_g7_soc0_funcs), ++ .configs_g7 = pin_cfg, ++ .nconfigs_g7 = ARRAY_SIZE(pin_cfg), ++ }, ++ .configs = aspeed_g7_configs, ++ .nconfigs = ARRAY_SIZE(aspeed_g7_configs), ++ .confmaps = aspeed_g7_pin_config_map, ++ .nconfmaps = ARRAY_SIZE(aspeed_g7_pin_config_map), ++}; ++ ++static int aspeed_g7_soc0_pinctrl_probe(struct platform_device *pdev) ++{ ++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc0_pinctrl_desc, ++ &aspeed_g7_pinctrl_data); ++} ++ ++static const struct of_device_id aspeed_g7_soc0_pinctrl_match[] = { ++ { .compatible = "aspeed,ast2700-soc0-pinctrl" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_g7_soc0_pinctrl_match); ++ ++static struct platform_driver aspeed_g7_soc0_pinctrl_driver = { ++ .probe = aspeed_g7_soc0_pinctrl_probe, ++ .driver = { ++ .name = "aspeed-g7-soc0-pinctrl", ++ .of_match_table = aspeed_g7_soc0_pinctrl_match, ++ .suppress_bind_attrs = true, ++ }, ++}; ++ ++static int __init aspeed_g7_soc0_pinctrl_register(void) ++{ ++ return platform_driver_register(&aspeed_g7_soc0_pinctrl_driver); ++} ++arch_initcall(aspeed_g7_soc0_pinctrl_register); +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c 2025-12-23 10:16:21.136032468 +0000 +@@ -0,0 +1,2533 @@ ++// SPDX-License-Identifier: GPL-2.0 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "pinctrl-aspeed.h" ++ ++#define SCU3B0 0x3B0 /* USB Controller Register */ ++#define SCU3B4 0x3B4 /* USB Controller Lock Register */ ++#define SCU3B8 0x3B8 /* USB Controller Secure Register #1 */ ++#define SCU3BC 0x3BC /* USB Controller Secure Register #2 */ ++#define SCU3C0 0x3C0 /* USB Controller Secure Register #3 */ ++#define SCU400 0x400 /* Multi-function Pin Control #1 */ ++#define SCU404 0x404 /* Multi-function Pin Control #2 */ ++#define SCU408 0x408 /* Multi-function Pin Control #3 */ ++#define SCU40C 0x40C /* Multi-function Pin Control #4 */ ++#define SCU410 0x410 /* Multi-function Pin Control #5 */ ++#define SCU414 0x414 /* Multi-function Pin Control #6 */ ++#define SCU418 0x418 /* Multi-function Pin Control #7 */ ++#define SCU41C 0x41C /* Multi-function Pin Control #8 */ ++#define SCU420 0x420 /* Multi-function Pin Control #9 */ ++#define SCU424 0x424 /* Multi-function Pin Control #10 */ ++#define SCU428 0x428 /* Multi-function Pin Control #11 */ ++#define SCU42C 0x42C /* Multi-function Pin Control #12 */ ++#define SCU430 0x430 /* Multi-function Pin Control #13 */ ++#define SCU434 0x434 /* Multi-function Pin Control #14 */ ++#define SCU438 0x438 /* Multi-function Pin Control #15 */ ++#define SCU43C 0x43C /* Multi-function Pin Control #16 */ ++#define SCU440 0x440 /* Multi-function Pin Control #17 */ ++#define SCU444 0x444 /* Multi-function Pin Control #18 */ ++#define SCU448 0x448 /* Multi-function Pin Control #19 */ ++#define SCU44C 0x44C /* Multi-function Pin Control #20 */ ++#define SCU450 0x450 /* Multi-function Pin Control #21 */ ++#define SCU454 0x454 /* Multi-function Pin Control #22 */ ++#define SCU458 0x458 /* Multi-function Pin Control #23 */ ++#define SCU45C 0x45C /* Multi-function Pin Control #24 */ ++#define SCU460 0x460 /* Multi-function Pin Control #25 */ ++#define SCU464 0x464 /* Multi-function Pin Control #26 */ ++#define SCU468 0x468 /* Multi-function Pin Control #27 */ ++#define SCU46C 0x46C /* Multi-function Pin Control #28 */ ++#define SCU470 0x470 /* Multi-function Pin Control #29 */ ++#define SCU474 0x474 /* Multi-function Pin Control #30 */ ++#define SCU478 0x478 /* Multi-function Pin Control #31 */ ++#define SCU47C 0x47C ++#define SCU480 0x480 /* Disable Pull-Down Control #1 */ ++#define SCU484 0x484 /* Disable Pull-Down Control #2 */ ++#define SCU488 0x488 /* Disable Pull-Down Control #3 */ ++#define SCU48C 0x48C /* Disable Pull-Down Control #4 */ ++#define SCU490 0x490 /* Disable Pull-Down Control #5 */ ++#define SCU494 0x494 /* Disable Pull-Down Control #6 */ ++#define SCU498 0x498 /* Disable Pull-Down Control #7 */ ++#define SCU4A0 0x4A0 /* Voltage Selection */ ++#define SCU4C0 0x4C0 /* Driving Strength #0 A-I */ ++#define SCU4C4 0x4C4 /* Driving Strength #1 J-K */ ++#define SCU4C8 0x4C8 /* Driving Strength #2 L-M */ ++#define SCU4CC 0x4CC /* Driving Strength #3 N-O */ ++#define SCU4D0 0x4D0 /* Driving Strength #4 P-Q */ ++#define SCU4D4 0x4D4 /* Driving Strength #5 R-S */ ++#define SCU4D8 0x4D8 /* Driving Strength #6 T-U */ ++#define SCU4DC 0x4DC /* Driving Strength #7 W */ ++ ++#define SCU908 0x908 /* PCIe RC PERST Pin Control */ ++ ++enum { ++ C16, ++ C14, ++ C11, ++ D9, ++ F14, ++ D10, ++ C12, ++ C13, ++ AC26, ++ AA25, ++ AB23, ++ U22, ++ V21, ++ N26, ++ P25, ++ N25, ++ V23, ++ W22, ++ AB26, ++ AD26, ++ P26, ++ AE26, ++ AF26, ++ AF25, ++ AE25, ++ AD25, ++ AF23, ++ AF20, ++ AF21, ++ AE21, ++ AE23, ++ AD22, ++ AF17, ++ AA16, ++ Y16, ++ V17, ++ J13, ++ AB16, ++ AC16, ++ AF16, ++ AA15, ++ AB15, ++ AC15, ++ AD15, ++ Y15, ++ AA14, ++ W16, ++ V16, ++ AB18, ++ AC18, ++ K13, ++ AA17, ++ AB17, ++ AD16, ++ AC17, ++ AD17, ++ AE16, ++ AE17, ++ AB24, ++ W26, ++ HOLE0, ++ HOLE1, ++ HOLE2, ++ HOLE3, ++ W25, ++ Y23, ++ Y24, ++ W21, ++ AA23, ++ AC22, ++ AB22, ++ Y21, ++ AE20, ++ AF19, ++ Y22, ++ AA20, ++ AA22, ++ AB20, ++ AF18, ++ AE19, ++ AD20, ++ AC20, ++ AA21, ++ AB21, ++ AC19, ++ AE18, ++ AD19, ++ AD18, ++ U25, ++ U26, ++ Y26, ++ AA24, ++ R25, ++ AA26, ++ R26, ++ Y25, ++ B16, ++ D14, ++ B15, ++ B14, ++ C17, ++ B13, ++ E14, ++ C15, ++ D24, ++ B23, ++ B22, ++ C23, ++ B18, ++ B21, ++ M15, ++ B19, ++ B26, ++ A25, ++ A24, ++ B24, ++ E26, ++ A21, ++ A19, ++ A18, ++ D26, ++ C26, ++ A23, ++ A22, ++ B25, ++ F26, ++ A26, ++ A14, ++ E10, ++ E13, ++ D12, ++ F10, ++ E11, ++ F11, ++ F13, ++ N15, ++ C20, ++ C19, ++ A8, ++ R14, ++ A7, ++ P14, ++ D20, ++ A6, ++ B6, ++ N14, ++ B7, ++ B8, ++ B9, ++ M14, ++ J11, ++ E7, ++ D19, ++ B11, ++ D15, ++ B12, ++ B10, ++ P13, ++ C18, ++ C6, ++ C7, ++ D7, ++ N13, ++ C8, ++ C9, ++ C10, ++ M16, ++ A15, ++ G11, ++ H7, ++ H8, ++ H9, ++ H10, ++ H11, ++ J9, ++ J10, ++ E9, ++ F9, ++ F8, ++ M13, ++ F7, ++ D8, ++ E8, ++ L12, ++ F12, ++ E12, ++ J12, ++ G7, ++ G8, ++ G9, ++ G10, ++ K12, ++ W17, ++ V18, ++ W18, ++ Y17, ++ AA18, ++ AA13, ++ Y18, ++ AA12, ++ W20, ++ V20, ++ Y11, ++ V14, ++ V19, ++ W14, ++ Y20, ++ AB19, ++ U21, ++ T24, ++ V24, ++ V22, ++ T23, ++ AC25, ++ AB25, ++ AC24, ++ SGMII0, ++ PCIERC2_PERST, ++ PORTC_MODE, // SCU3B0[1:0] ++ PORTD_MODE, // SCU3B0[3:2] ++}; ++ ++GROUP_DECL(ESPI0, B16, D14, B15, B14, C17, B13, E14, C15); ++GROUP_DECL(ESPI1, C16, C14, C11, D9, F14, D10, C12, C13); ++GROUP_DECL(LPC0, AF26, AF25, B16, D14, B15, B14, C17, B13, E14, C15); ++GROUP_DECL(LPC1, C16, C14, C11, D9, F14, D10, C12, C13, AE16, AE17); ++GROUP_DECL(SD, C16, C14, C11, D9, F14, D10, C12, C13); ++GROUP_DECL(VPI, C16, C14, C11, D9, F14, D10, C12, C13, AC26, AA25, AB23, U22, ++ V21, N26, P25, N25, V23, W22, AB26, AD26, P26, AE26, AF26, AF25, ++ AE25, AD25, AF23, AF20, AF21, AE21); ++GROUP_DECL(OSCCLK, C17); ++GROUP_DECL(TACH0, AC26); ++GROUP_DECL(TACH1, AA25); ++GROUP_DECL(TACH2, AB23); ++GROUP_DECL(TACH3, U22); ++GROUP_DECL(THRU0, AC26, AA25); ++GROUP_DECL(THRU1, AB23, U22); ++GROUP_DECL(TACH4, V21); ++GROUP_DECL(TACH5, N26); ++GROUP_DECL(TACH6, P25); ++GROUP_DECL(TACH7, N25); ++GROUP_DECL(NTCS5, V21); ++GROUP_DECL(NDCD5, N26); ++GROUP_DECL(NDSR5, P25); ++GROUP_DECL(NRI5, N25); ++GROUP_DECL(SALT12, AB26); ++GROUP_DECL(SALT13, AD26); ++GROUP_DECL(SALT14, P26); ++GROUP_DECL(SALT15, AE26); ++GROUP_DECL(NDTR5, V23); ++GROUP_DECL(NRTS5, W22); ++GROUP_DECL(NCTS6, AB26); ++GROUP_DECL(NDCD6, AD26); ++GROUP_DECL(NDSR6, P26); ++GROUP_DECL(NRI6, AE26); ++GROUP_DECL(NDTR6, AF26); ++GROUP_DECL(NRTS6, AF25); ++GROUP_DECL(TACH8, V23); ++GROUP_DECL(TACH9, W22); ++GROUP_DECL(TACH10, AB26); ++GROUP_DECL(TACH11, AD26); ++GROUP_DECL(TACH12, P26); ++GROUP_DECL(TACH13, AE26); ++GROUP_DECL(TACH14, AF26); ++GROUP_DECL(TACH15, AF25); ++GROUP_DECL(SPIM0, AE25, AD25, AF23, AF20, AF21, AE21, AE23); ++GROUP_DECL(PWM0, AE25); ++GROUP_DECL(PWM1, AD25); ++GROUP_DECL(PWM2, AF23); ++GROUP_DECL(PWM3, AF20); ++GROUP_DECL(PWM4, AF21); ++GROUP_DECL(PWM5, AE21); ++GROUP_DECL(PWM6, AE23); ++GROUP_DECL(PWM7, AD22); ++GROUP_DECL(SIOPBON0, AE25); ++GROUP_DECL(SIOPBIN0, AD25); ++GROUP_DECL(SIOSCIN0, AF23); ++GROUP_DECL(SIOS3N0, AF20); ++GROUP_DECL(SIOS5N0, AF21); ++GROUP_DECL(SIOPWREQN0, AE21); ++GROUP_DECL(SIOONCTRLN0, AE23); ++GROUP_DECL(SIOPWRGD0, AD22); ++GROUP_DECL(NCTS0, AF17); ++GROUP_DECL(NDCD0, AA16); ++GROUP_DECL(NDSR0, Y16); ++GROUP_DECL(NRI0, V17); ++GROUP_DECL(NDTR0, J13); ++GROUP_DECL(NRTS0, AB16); ++GROUP_DECL(TXD0, AC16); ++GROUP_DECL(RXD0, AF16); ++GROUP_DECL(NCTS1, AA15); ++GROUP_DECL(NDCD1, AB15); ++GROUP_DECL(NDSR1, AC15); ++GROUP_DECL(NRI1, AD15); ++GROUP_DECL(NDTR1, Y15); ++GROUP_DECL(NRTS1, AA14); ++GROUP_DECL(TXD1, W16); ++GROUP_DECL(RXD1, V16); ++GROUP_DECL(TXD2, AB18); ++GROUP_DECL(RXD2, AC18); ++GROUP_DECL(TXD3, K13); ++GROUP_DECL(RXD3, AA17); ++GROUP_DECL(NCTS5, V21); ++GROUP_DECL(TXD5, AB17); ++GROUP_DECL(RXD5, AD16); ++GROUP_DECL(TXD6, AC17); ++GROUP_DECL(RXD6, AD17); ++GROUP_DECL(TXD7, AE16); ++GROUP_DECL(RXD7, AE17); ++GROUP_DECL(TXD8, M15); ++GROUP_DECL(RXD8, B19); ++GROUP_DECL(TXD9, B26); ++GROUP_DECL(RXD9, A25); ++GROUP_DECL(TXD10, A24); ++GROUP_DECL(RXD10, B24); ++GROUP_DECL(TXD11, E26); ++GROUP_DECL(RXD11, A21); ++GROUP_DECL(SPIM1, K13, AA17, AB17, AD16, AC17, AD17, AE16, AE17); ++GROUP_DECL(WDTRST0N, K13); ++GROUP_DECL(WDTRST1N, AA17); ++GROUP_DECL(WDTRST2N, AB17); ++GROUP_DECL(WDTRST3N, AD16); ++GROUP_DECL(WDTRST4N, AC25); ++GROUP_DECL(WDTRST5N, AB25); ++GROUP_DECL(WDTRST6N, AC24); ++GROUP_DECL(WDTRST7N, AB24); ++GROUP_DECL(PWM8, K13); ++GROUP_DECL(PWM9, AA17); ++GROUP_DECL(PWM10, AB17); ++GROUP_DECL(PWM11, AD16); ++GROUP_DECL(PWM12, AC17); ++GROUP_DECL(PWM13, AD17); ++GROUP_DECL(PWM14, AE16); ++GROUP_DECL(PWM15, AE17); ++GROUP_DECL(SALT0, AC17); ++GROUP_DECL(SALT1, AD17); ++GROUP_DECL(SALT2, AC15); ++GROUP_DECL(SALT3, AD15); ++GROUP_DECL(FSI0, AD20, AC20); ++GROUP_DECL(FSI1, AA21, AB21); ++GROUP_DECL(FSI2, AC19, AE18); ++GROUP_DECL(FSI3, AD19, AD18); ++GROUP_DECL(SPIM2, W25, Y23, Y24, W21, AA23, AC22, AB22, Y21); ++GROUP_DECL(SALT4, W17); ++GROUP_DECL(SALT5, V18); ++GROUP_DECL(SALT6, W18); ++GROUP_DECL(SALT7, Y17); ++GROUP_DECL(SALT8, AA18); ++GROUP_DECL(SALT9, AA13); ++GROUP_DECL(SALT10, Y18); ++GROUP_DECL(SALT11, AA12); ++GROUP_DECL(ADC0, W17); ++GROUP_DECL(ADC1, V18); ++GROUP_DECL(ADC2, W18); ++GROUP_DECL(ADC3, Y17); ++GROUP_DECL(ADC4, AA18); ++GROUP_DECL(ADC5, AA13); ++GROUP_DECL(ADC6, Y18); ++GROUP_DECL(ADC7, AA12); ++GROUP_DECL(ADC8, W20); ++GROUP_DECL(ADC9, V20); ++GROUP_DECL(ADC10, Y11); ++GROUP_DECL(ADC11, V14); ++GROUP_DECL(ADC12, V19); ++GROUP_DECL(ADC13, W14); ++GROUP_DECL(ADC14, Y20); ++GROUP_DECL(ADC15, AB19); ++GROUP_DECL(AUXPWRGOOD0, W14); ++GROUP_DECL(AUXPWRGOOD1, Y20); ++GROUP_DECL(SGPM0, U21, T24, V22, T23); ++GROUP_DECL(SGPM1, AC25, AB25, AB24, W26); ++GROUP_DECL(I2C0, G11, H7); ++GROUP_DECL(I2C1, H8, H9); ++GROUP_DECL(I2C2, H10, H11); ++GROUP_DECL(I2C3, J9, J10); ++GROUP_DECL(I2C4, E9, F9); ++GROUP_DECL(I2C5, F8, M13); ++GROUP_DECL(I2C6, F7, D8); ++GROUP_DECL(I2C7, E8, L12); ++GROUP_DECL(I2C8, F12, E12); ++GROUP_DECL(I2C9, J12, G7); ++GROUP_DECL(I2C10, G8, G9); ++GROUP_DECL(I2C11, G10, K12); ++GROUP_DECL(I2C12, AC18, AA17); ++GROUP_DECL(I2C13, AB17, AD16); ++GROUP_DECL(I2C14, AC17, AD17); ++GROUP_DECL(I2C15, AE16, AE17); ++GROUP_DECL(DI2C0, C16, D9); ++GROUP_DECL(DI2C1, C14, F14); ++GROUP_DECL(DI2C2, D10, C12); ++GROUP_DECL(DI2C3, C11, C13); ++GROUP_DECL(DI2C8, U25, U26); ++GROUP_DECL(DI2C9, Y26, AA24); ++GROUP_DECL(DI2C10, R25, AA26); ++GROUP_DECL(DI2C11, R26, Y25); ++GROUP_DECL(DI2C12, W25, Y23); ++GROUP_DECL(DI2C13, Y24, W21); ++GROUP_DECL(DI2C14, AA23, AC22); ++GROUP_DECL(DI2C15, AB22, Y21); ++GROUP_DECL(LTPI_PS_I2C0, G11, H7); ++GROUP_DECL(LTPI_PS_I2C1, H8, H9); ++GROUP_DECL(LTPI_PS_I2C2, H10, H11); ++GROUP_DECL(LTPI_PS_I2C3, J9, J10); ++GROUP_DECL(SIOPBON1, AF17); ++GROUP_DECL(SIOPBIN1, AA16); ++GROUP_DECL(SIOSCIN1, Y16); ++GROUP_DECL(SIOS3N1, V17); ++GROUP_DECL(SIOS5N1, J13); ++GROUP_DECL(SIOPWREQN1, AB16); ++GROUP_DECL(SIOONCTRLN1, AA15); ++GROUP_DECL(SIOPWRGD1, AB15); ++GROUP_DECL(HVI3C12, W25, Y23); ++GROUP_DECL(HVI3C13, Y24, W21); ++GROUP_DECL(HVI3C14, AA23, AC22); ++GROUP_DECL(HVI3C15, AB22, Y21); ++GROUP_DECL(I3C4, AE20, AF19); ++GROUP_DECL(I3C5, Y22, AA20); ++GROUP_DECL(I3C6, AA22, AB20); ++GROUP_DECL(I3C7, AF18, AE19); ++GROUP_DECL(I3C8, AD20, AC20); ++GROUP_DECL(I3C9, AA21, AB21); ++GROUP_DECL(I3C10, AC19, AE18); ++GROUP_DECL(I3C11, AD19, AD18); ++GROUP_DECL(HVI3C0, U25, U26); ++GROUP_DECL(HVI3C1, Y26, AA24); ++GROUP_DECL(HVI3C2, R25, AA26); ++GROUP_DECL(HVI3C3, R26, Y25); ++GROUP_DECL(LTPI, U25, U26, Y26, AA24); ++GROUP_DECL(SPI0, D24, B23, B22); ++GROUP_DECL(QSPI0, C23, B18); ++GROUP_DECL(SPI0CS1, B21); ++GROUP_DECL(SPI0ABR, M15); ++GROUP_DECL(SPI0WPN, B19); ++GROUP_DECL(SPI1, B26, A25, A24); ++GROUP_DECL(QSPI1, B24, E26); ++GROUP_DECL(SPI1CS1, A21); ++GROUP_DECL(SPI1ABR, A19); ++GROUP_DECL(SPI1WPN, A18); ++GROUP_DECL(SPI2, D26, C26, A23, A22); ++GROUP_DECL(QSPI2, B25, F26); ++GROUP_DECL(SPI2CS1, A26); ++GROUP_DECL(THRU2, A19, A18); ++GROUP_DECL(THRU3, B25, F26); ++GROUP_DECL(JTAGM1, D12, F10, E11, F11, F13); ++GROUP_DECL(MDIO0, B9, M14); ++GROUP_DECL(MDIO1, C9, C10); ++GROUP_DECL(MDIO2, E10, E13); ++GROUP_DECL(FWQSPI, M16, A15); ++GROUP_DECL(FWSPIABR, A14); ++GROUP_DECL(FWSPIWPN, N15); ++GROUP_DECL(RGMII0, C20, C19, A8, R14, A7, P14, D20, A6, B6, N14, B7, B8); ++GROUP_DECL(RGMII1, D19, B11, D15, B12, B10, P13, C18, C6, C7, D7, N13, C8); ++GROUP_DECL(RMII0, C20, A8, R14, A7, P14, A6, B6, N14); ++GROUP_DECL(RMII0RCLKO, D20); ++GROUP_DECL(RMII1, D19, D15, B12, B10, P13, C6, C7, D7); ++GROUP_DECL(RMII1RCLKO, C18); ++GROUP_DECL(VGA, J11, E7); ++GROUP_DECL(DSGPM0, D19, B10, C7, D7); ++GROUP_DECL(SGPS, B11, C18, N13, C8); ++GROUP_DECL(I2CF0, F12, E12, J12, G7); ++GROUP_DECL(I2CF1, E9, F9, F8, M13); ++GROUP_DECL(I2CF2, F7, D8, E8, L12); ++GROUP_DECL(CANBUS, G7, G8, G9); ++GROUP_DECL(USBUART, G10, K12); ++GROUP_DECL(HBLED, V24); ++GROUP_DECL(MACLINK0, U21); ++GROUP_DECL(MACLINK1, AC24); ++GROUP_DECL(MACLINK2, T24); ++GROUP_DECL(NCTS2, U21); ++GROUP_DECL(NDCD2, T24); ++GROUP_DECL(NDSR2, V22); ++GROUP_DECL(NRI2, T23); ++GROUP_DECL(NDTR2, AC25); ++GROUP_DECL(NRTS2, AB25); ++GROUP_DECL(SMON0, U21, T24, V22, T23); ++GROUP_DECL(SMON1, AB24, W26, AC25, AB25); ++GROUP_DECL(SGMII, SGMII0); ++//PCIE RC PERST ++GROUP_DECL(PE2SGRSTN, PCIERC2_PERST, E10); ++GROUP_DECL(USB2CUD, PORTC_MODE); ++GROUP_DECL(USB2CD, PORTC_MODE); ++GROUP_DECL(USB2CH, PORTC_MODE); ++GROUP_DECL(USB2CU, PORTC_MODE); ++GROUP_DECL(USB2DD, PORTD_MODE); ++GROUP_DECL(USB2DH, PORTD_MODE); ++ ++static struct aspeed_pin_group aspeed_g7_soc1_pingroups[] = { ++ ASPEED_PINCTRL_GROUP(ESPI0), ++ ASPEED_PINCTRL_GROUP(ESPI1), ++ ASPEED_PINCTRL_GROUP(LPC0), ++ ASPEED_PINCTRL_GROUP(LPC1), ++ ASPEED_PINCTRL_GROUP(SD), ++ ASPEED_PINCTRL_GROUP(VPI), ++ ASPEED_PINCTRL_GROUP(OSCCLK), ++ ASPEED_PINCTRL_GROUP(TACH0), ++ ASPEED_PINCTRL_GROUP(TACH1), ++ ASPEED_PINCTRL_GROUP(TACH2), ++ ASPEED_PINCTRL_GROUP(TACH3), ++ ASPEED_PINCTRL_GROUP(THRU0), ++ ASPEED_PINCTRL_GROUP(THRU1), ++ ASPEED_PINCTRL_GROUP(TACH4), ++ ASPEED_PINCTRL_GROUP(TACH5), ++ ASPEED_PINCTRL_GROUP(TACH6), ++ ASPEED_PINCTRL_GROUP(TACH7), ++ ASPEED_PINCTRL_GROUP(NTCS5), ++ ASPEED_PINCTRL_GROUP(NDCD5), ++ ASPEED_PINCTRL_GROUP(NDSR5), ++ ASPEED_PINCTRL_GROUP(NRI5), ++ ASPEED_PINCTRL_GROUP(SALT12), ++ ASPEED_PINCTRL_GROUP(SALT13), ++ ASPEED_PINCTRL_GROUP(SALT14), ++ ASPEED_PINCTRL_GROUP(SALT15), ++ ASPEED_PINCTRL_GROUP(NDTR5), ++ ASPEED_PINCTRL_GROUP(NRTS5), ++ ASPEED_PINCTRL_GROUP(NCTS6), ++ ASPEED_PINCTRL_GROUP(NDCD6), ++ ASPEED_PINCTRL_GROUP(NDSR6), ++ ASPEED_PINCTRL_GROUP(NRI6), ++ ASPEED_PINCTRL_GROUP(NDTR6), ++ ASPEED_PINCTRL_GROUP(NRTS6), ++ ASPEED_PINCTRL_GROUP(TACH8), ++ ASPEED_PINCTRL_GROUP(TACH9), ++ ASPEED_PINCTRL_GROUP(TACH10), ++ ASPEED_PINCTRL_GROUP(TACH11), ++ ASPEED_PINCTRL_GROUP(TACH12), ++ ASPEED_PINCTRL_GROUP(TACH13), ++ ASPEED_PINCTRL_GROUP(TACH14), ++ ASPEED_PINCTRL_GROUP(TACH15), ++ ASPEED_PINCTRL_GROUP(SPIM0), ++ ASPEED_PINCTRL_GROUP(PWM0), ++ ASPEED_PINCTRL_GROUP(PWM1), ++ ASPEED_PINCTRL_GROUP(PWM2), ++ ASPEED_PINCTRL_GROUP(PWM3), ++ ASPEED_PINCTRL_GROUP(PWM4), ++ ASPEED_PINCTRL_GROUP(PWM5), ++ ASPEED_PINCTRL_GROUP(PWM6), ++ ASPEED_PINCTRL_GROUP(PWM7), ++ ASPEED_PINCTRL_GROUP(SIOPBON0), ++ ASPEED_PINCTRL_GROUP(SIOPBIN0), ++ ASPEED_PINCTRL_GROUP(SIOSCIN0), ++ ASPEED_PINCTRL_GROUP(SIOS3N0), ++ ASPEED_PINCTRL_GROUP(SIOS5N0), ++ ASPEED_PINCTRL_GROUP(SIOPWREQN0), ++ ASPEED_PINCTRL_GROUP(SIOONCTRLN0), ++ ASPEED_PINCTRL_GROUP(SIOPWRGD0), ++ ASPEED_PINCTRL_GROUP(NCTS0), ++ ASPEED_PINCTRL_GROUP(NDCD0), ++ ASPEED_PINCTRL_GROUP(NDSR0), ++ ASPEED_PINCTRL_GROUP(NRI0), ++ ASPEED_PINCTRL_GROUP(NDTR0), ++ ASPEED_PINCTRL_GROUP(NRTS0), ++ ASPEED_PINCTRL_GROUP(TXD0), ++ ASPEED_PINCTRL_GROUP(RXD0), ++ ASPEED_PINCTRL_GROUP(NCTS1), ++ ASPEED_PINCTRL_GROUP(NDCD1), ++ ASPEED_PINCTRL_GROUP(NDSR1), ++ ASPEED_PINCTRL_GROUP(NRI1), ++ ASPEED_PINCTRL_GROUP(NDTR1), ++ ASPEED_PINCTRL_GROUP(NRTS1), ++ ASPEED_PINCTRL_GROUP(TXD1), ++ ASPEED_PINCTRL_GROUP(RXD1), ++ ASPEED_PINCTRL_GROUP(TXD2), ++ ASPEED_PINCTRL_GROUP(RXD2), ++ ASPEED_PINCTRL_GROUP(TXD3), ++ ASPEED_PINCTRL_GROUP(RXD3), ++ ASPEED_PINCTRL_GROUP(NCTS5), ++ ASPEED_PINCTRL_GROUP(NDCD5), ++ ASPEED_PINCTRL_GROUP(NDSR5), ++ ASPEED_PINCTRL_GROUP(NRI5), ++ ASPEED_PINCTRL_GROUP(NDTR5), ++ ASPEED_PINCTRL_GROUP(NRTS5), ++ ASPEED_PINCTRL_GROUP(TXD5), ++ ASPEED_PINCTRL_GROUP(RXD5), ++ ASPEED_PINCTRL_GROUP(NCTS6), ++ ASPEED_PINCTRL_GROUP(NDCD6), ++ ASPEED_PINCTRL_GROUP(NDSR6), ++ ASPEED_PINCTRL_GROUP(NRI6), ++ ASPEED_PINCTRL_GROUP(NDTR6), ++ ASPEED_PINCTRL_GROUP(NRTS6), ++ ASPEED_PINCTRL_GROUP(TXD6), ++ ASPEED_PINCTRL_GROUP(RXD6), ++ ASPEED_PINCTRL_GROUP(TXD6), ++ ASPEED_PINCTRL_GROUP(RXD6), ++ ASPEED_PINCTRL_GROUP(TXD7), ++ ASPEED_PINCTRL_GROUP(RXD7), ++ ASPEED_PINCTRL_GROUP(TXD8), ++ ASPEED_PINCTRL_GROUP(RXD8), ++ ASPEED_PINCTRL_GROUP(TXD9), ++ ASPEED_PINCTRL_GROUP(RXD9), ++ ASPEED_PINCTRL_GROUP(TXD10), ++ ASPEED_PINCTRL_GROUP(RXD10), ++ ASPEED_PINCTRL_GROUP(TXD11), ++ ASPEED_PINCTRL_GROUP(RXD11), ++ ASPEED_PINCTRL_GROUP(SPIM1), ++ ASPEED_PINCTRL_GROUP(WDTRST0N), ++ ASPEED_PINCTRL_GROUP(WDTRST1N), ++ ASPEED_PINCTRL_GROUP(WDTRST2N), ++ ASPEED_PINCTRL_GROUP(WDTRST3N), ++ ASPEED_PINCTRL_GROUP(WDTRST4N), ++ ASPEED_PINCTRL_GROUP(WDTRST5N), ++ ASPEED_PINCTRL_GROUP(WDTRST6N), ++ ASPEED_PINCTRL_GROUP(WDTRST7N), ++ ASPEED_PINCTRL_GROUP(PWM8), ++ ASPEED_PINCTRL_GROUP(PWM9), ++ ASPEED_PINCTRL_GROUP(PWM10), ++ ASPEED_PINCTRL_GROUP(PWM11), ++ ASPEED_PINCTRL_GROUP(PWM12), ++ ASPEED_PINCTRL_GROUP(PWM13), ++ ASPEED_PINCTRL_GROUP(PWM14), ++ ASPEED_PINCTRL_GROUP(PWM15), ++ ASPEED_PINCTRL_GROUP(SALT0), ++ ASPEED_PINCTRL_GROUP(SALT1), ++ ASPEED_PINCTRL_GROUP(SALT2), ++ ASPEED_PINCTRL_GROUP(SALT3), ++ ASPEED_PINCTRL_GROUP(FSI0), ++ ASPEED_PINCTRL_GROUP(FSI1), ++ ASPEED_PINCTRL_GROUP(FSI2), ++ ASPEED_PINCTRL_GROUP(FSI3), ++ ASPEED_PINCTRL_GROUP(SPIM2), ++ ASPEED_PINCTRL_GROUP(SALT4), ++ ASPEED_PINCTRL_GROUP(SALT5), ++ ASPEED_PINCTRL_GROUP(SALT6), ++ ASPEED_PINCTRL_GROUP(SALT7), ++ ASPEED_PINCTRL_GROUP(SALT8), ++ ASPEED_PINCTRL_GROUP(SALT9), ++ ASPEED_PINCTRL_GROUP(SALT10), ++ ASPEED_PINCTRL_GROUP(SALT11), ++ ASPEED_PINCTRL_GROUP(ADC0), ++ ASPEED_PINCTRL_GROUP(ADC1), ++ ASPEED_PINCTRL_GROUP(ADC2), ++ ASPEED_PINCTRL_GROUP(ADC3), ++ ASPEED_PINCTRL_GROUP(ADC4), ++ ASPEED_PINCTRL_GROUP(ADC5), ++ ASPEED_PINCTRL_GROUP(ADC6), ++ ASPEED_PINCTRL_GROUP(ADC7), ++ ASPEED_PINCTRL_GROUP(ADC8), ++ ASPEED_PINCTRL_GROUP(ADC9), ++ ASPEED_PINCTRL_GROUP(ADC10), ++ ASPEED_PINCTRL_GROUP(ADC11), ++ ASPEED_PINCTRL_GROUP(ADC12), ++ ASPEED_PINCTRL_GROUP(ADC13), ++ ASPEED_PINCTRL_GROUP(ADC14), ++ ASPEED_PINCTRL_GROUP(ADC15), ++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD0), ++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD1), ++ ASPEED_PINCTRL_GROUP(SGPM0), ++ ASPEED_PINCTRL_GROUP(SGPM1), ++ ASPEED_PINCTRL_GROUP(I2C0), ++ ASPEED_PINCTRL_GROUP(I2C1), ++ ASPEED_PINCTRL_GROUP(I2C2), ++ ASPEED_PINCTRL_GROUP(I2C3), ++ ASPEED_PINCTRL_GROUP(I2C4), ++ ASPEED_PINCTRL_GROUP(I2C5), ++ ASPEED_PINCTRL_GROUP(I2C6), ++ ASPEED_PINCTRL_GROUP(I2C7), ++ ASPEED_PINCTRL_GROUP(I2C8), ++ ASPEED_PINCTRL_GROUP(I2C9), ++ ASPEED_PINCTRL_GROUP(I2C10), ++ ASPEED_PINCTRL_GROUP(I2C11), ++ ASPEED_PINCTRL_GROUP(I2C12), ++ ASPEED_PINCTRL_GROUP(I2C13), ++ ASPEED_PINCTRL_GROUP(I2C14), ++ ASPEED_PINCTRL_GROUP(I2C15), ++ ASPEED_PINCTRL_GROUP(LTPI_PS_I2C0), ++ ASPEED_PINCTRL_GROUP(LTPI_PS_I2C1), ++ ASPEED_PINCTRL_GROUP(LTPI_PS_I2C2), ++ ASPEED_PINCTRL_GROUP(LTPI_PS_I2C3), ++ ASPEED_PINCTRL_GROUP(DI2C0), ++ ASPEED_PINCTRL_GROUP(DI2C1), ++ ASPEED_PINCTRL_GROUP(DI2C2), ++ ASPEED_PINCTRL_GROUP(DI2C3), ++ ASPEED_PINCTRL_GROUP(DI2C8), ++ ASPEED_PINCTRL_GROUP(DI2C9), ++ ASPEED_PINCTRL_GROUP(DI2C10), ++ ASPEED_PINCTRL_GROUP(DI2C11), ++ ASPEED_PINCTRL_GROUP(DI2C12), ++ ASPEED_PINCTRL_GROUP(DI2C13), ++ ASPEED_PINCTRL_GROUP(DI2C14), ++ ASPEED_PINCTRL_GROUP(DI2C15), ++ ASPEED_PINCTRL_GROUP(SIOPBON1), ++ ASPEED_PINCTRL_GROUP(SIOPBIN1), ++ ASPEED_PINCTRL_GROUP(SIOSCIN1), ++ ASPEED_PINCTRL_GROUP(SIOS3N1), ++ ASPEED_PINCTRL_GROUP(SIOS5N1), ++ ASPEED_PINCTRL_GROUP(SIOPWREQN1), ++ ASPEED_PINCTRL_GROUP(SIOONCTRLN1), ++ ASPEED_PINCTRL_GROUP(SIOPWRGD1), ++ ASPEED_PINCTRL_GROUP(HVI3C12), ++ ASPEED_PINCTRL_GROUP(HVI3C13), ++ ASPEED_PINCTRL_GROUP(HVI3C14), ++ ASPEED_PINCTRL_GROUP(HVI3C15), ++ ASPEED_PINCTRL_GROUP(I3C4), ++ ASPEED_PINCTRL_GROUP(I3C5), ++ ASPEED_PINCTRL_GROUP(I3C6), ++ ASPEED_PINCTRL_GROUP(I3C7), ++ ASPEED_PINCTRL_GROUP(I3C8), ++ ASPEED_PINCTRL_GROUP(I3C9), ++ ASPEED_PINCTRL_GROUP(I3C10), ++ ASPEED_PINCTRL_GROUP(I3C11), ++ ASPEED_PINCTRL_GROUP(HVI3C0), ++ ASPEED_PINCTRL_GROUP(HVI3C1), ++ ASPEED_PINCTRL_GROUP(HVI3C2), ++ ASPEED_PINCTRL_GROUP(HVI3C3), ++ ASPEED_PINCTRL_GROUP(LTPI), ++ ASPEED_PINCTRL_GROUP(SPI0), ++ ASPEED_PINCTRL_GROUP(QSPI0), ++ ASPEED_PINCTRL_GROUP(SPI0CS1), ++ ASPEED_PINCTRL_GROUP(SPI0ABR), ++ ASPEED_PINCTRL_GROUP(SPI0WPN), ++ ASPEED_PINCTRL_GROUP(SPI1), ++ ASPEED_PINCTRL_GROUP(QSPI1), ++ ASPEED_PINCTRL_GROUP(SPI1CS1), ++ ASPEED_PINCTRL_GROUP(SPI1ABR), ++ ASPEED_PINCTRL_GROUP(SPI1WPN), ++ ASPEED_PINCTRL_GROUP(SPI2), ++ ASPEED_PINCTRL_GROUP(QSPI2), ++ ASPEED_PINCTRL_GROUP(SPI2CS1), ++ ASPEED_PINCTRL_GROUP(THRU2), ++ ASPEED_PINCTRL_GROUP(THRU3), ++ ASPEED_PINCTRL_GROUP(JTAGM1), ++ ASPEED_PINCTRL_GROUP(MDIO0), ++ ASPEED_PINCTRL_GROUP(MDIO1), ++ ASPEED_PINCTRL_GROUP(MDIO2), ++ ASPEED_PINCTRL_GROUP(FWQSPI), ++ ASPEED_PINCTRL_GROUP(FWSPIABR), ++ ASPEED_PINCTRL_GROUP(FWSPIWPN), ++ ASPEED_PINCTRL_GROUP(RGMII0), ++ ASPEED_PINCTRL_GROUP(RGMII1), ++ ASPEED_PINCTRL_GROUP(RMII0), ++ ASPEED_PINCTRL_GROUP(RMII0RCLKO), ++ ASPEED_PINCTRL_GROUP(RMII1), ++ ASPEED_PINCTRL_GROUP(RMII1RCLKO), ++ ASPEED_PINCTRL_GROUP(VGA), ++ ASPEED_PINCTRL_GROUP(DSGPM0), ++ ASPEED_PINCTRL_GROUP(SGPS), ++ ASPEED_PINCTRL_GROUP(I2CF0), ++ ASPEED_PINCTRL_GROUP(I2CF1), ++ ASPEED_PINCTRL_GROUP(I2CF2), ++ ASPEED_PINCTRL_GROUP(CANBUS), ++ ASPEED_PINCTRL_GROUP(USBUART), ++ ASPEED_PINCTRL_GROUP(HBLED), ++ ASPEED_PINCTRL_GROUP(MACLINK0), ++ ASPEED_PINCTRL_GROUP(MACLINK1), ++ ASPEED_PINCTRL_GROUP(MACLINK2), ++ ASPEED_PINCTRL_GROUP(NCTS2), ++ ASPEED_PINCTRL_GROUP(NDCD2), ++ ASPEED_PINCTRL_GROUP(NDSR2), ++ ASPEED_PINCTRL_GROUP(NRI2), ++ ASPEED_PINCTRL_GROUP(NDTR2), ++ ASPEED_PINCTRL_GROUP(NRTS2), ++ ASPEED_PINCTRL_GROUP(SMON0), ++ ASPEED_PINCTRL_GROUP(SMON1), ++ ASPEED_PINCTRL_GROUP(SGMII), ++ ASPEED_PINCTRL_GROUP(PE2SGRSTN), ++ ASPEED_PINCTRL_GROUP(USB2CUD), ++ ASPEED_PINCTRL_GROUP(USB2CD), ++ ASPEED_PINCTRL_GROUP(USB2CH), ++ ASPEED_PINCTRL_GROUP(USB2CU), ++ ASPEED_PINCTRL_GROUP(USB2DD), ++ ASPEED_PINCTRL_GROUP(USB2DH), ++}; ++ ++FUNC_DECL_(ESPI0, "ESPI0"); ++FUNC_DECL_(ESPI1, "ESPI1"); ++FUNC_DECL_(LPC0, "LPC0"); ++FUNC_DECL_(LPC1, "LPC1"); ++FUNC_DECL_(VPI, "VPI"); ++FUNC_DECL_(SD, "SD"); ++FUNC_DECL_(OSCCLK, "OSCCLK"); ++FUNC_DECL_(TACH0, "TACH0"); ++FUNC_DECL_(TACH1, "TACH1"); ++FUNC_DECL_(TACH2, "TACH2"); ++FUNC_DECL_(TACH3, "TACH3"); ++FUNC_DECL_(TACH4, "TACH4"); ++FUNC_DECL_(TACH5, "TACH5"); ++FUNC_DECL_(TACH6, "TACH6"); ++FUNC_DECL_(TACH7, "TACH7"); ++FUNC_DECL_(THRU0, "THRU0"); ++FUNC_DECL_(THRU1, "THRU1"); ++FUNC_DECL_(NTCS5, "NTCS5"); ++FUNC_DECL_(NDSR5, "NDSR5"); ++FUNC_DECL_(NRI5, "NRI5"); ++FUNC_DECL_(TACH8, "TACH8"); ++FUNC_DECL_(TACH9, "TACH9"); ++FUNC_DECL_(TACH10, "TACH10"); ++FUNC_DECL_(TACH11, "TACH11"); ++FUNC_DECL_(TACH12, "TACH12"); ++FUNC_DECL_(TACH13, "TACH13"); ++FUNC_DECL_(TACH14, "TACH14"); ++FUNC_DECL_(TACH15, "TACH15"); ++FUNC_DECL_(SALT12, "SALT12"); ++FUNC_DECL_(SALT13, "SALT13"); ++FUNC_DECL_(SALT14, "SALT14"); ++FUNC_DECL_(SALT15, "SALT15"); ++FUNC_DECL_(SPIM0, "SPIM0"); ++FUNC_DECL_(PWM0, "PWM0"); ++FUNC_DECL_(PWM1, "PWM1"); ++FUNC_DECL_(PWM2, "PWM2"); ++FUNC_DECL_(PWM3, "PWM3"); ++FUNC_DECL_(PWM4, "PWM4"); ++FUNC_DECL_(PWM5, "PWM5"); ++FUNC_DECL_(PWM6, "PWM6"); ++FUNC_DECL_(PWM7, "PWM7"); ++FUNC_DECL_(SIOPBON0, "SIOPBON0"); ++FUNC_DECL_(SIOPBIN0, "SIOPBIN0"); ++FUNC_DECL_(SIOSCIN0, "SIOSCIN0"); ++FUNC_DECL_(SIOS3N0, "SIOS3N0"); ++FUNC_DECL_(SIOS5N0, "SIOS5N0"); ++FUNC_DECL_(SIOPWREQN0, "SIOPWREQN0"); ++FUNC_DECL_(SIOONCTRLN0, "SIOONCTRLN0"); ++FUNC_DECL_(SIOPWRGD0, "SIOPWRGD0"); ++FUNC_DECL_(UART0, "NCTS0", "NDCD0", "NDSR0", "NRI0", "NDTR0", "NRTS0", "TXD0", "RXD0"); ++FUNC_DECL_(UART1, "NCTS1", "NDCD1", "NDSR1", "NRI1", "NDTR1", "NRTS1", "TXD1", "RXD1"); ++FUNC_DECL_(UART2, "TXD2", "RXD2"); ++FUNC_DECL_(UART3, "TXD3", "RXD3"); ++FUNC_DECL_(UART5, "NCTS5", "NDCD5", "NDSR5", "NRI5", "NDTR5", "NRTS5", "TXD5", "RXD5"); ++FUNC_DECL_(UART6, "NCTS6", "NDCD6", "NDSR6", "NRI6", "NDTR6", "NRTS6", "TXD6", "RXD6"); ++FUNC_DECL_(UART7, "TXD7", "RXD7"); ++FUNC_DECL_(UART8, "TXD8", "RXD8"); ++FUNC_DECL_(UART9, "TXD9", "RXD9"); ++FUNC_DECL_(UART10, "TXD10", "RXD10"); ++FUNC_DECL_(UART11, "TXD11", "RXD11"); ++FUNC_DECL_(SPIM1, "SPIM1"); ++FUNC_DECL_(SPIM2, "SPIM2"); ++FUNC_DECL_(PWM8, "PWM8"); ++FUNC_DECL_(PWM9, "PWM9"); ++FUNC_DECL_(PWM10, "PWM10"); ++FUNC_DECL_(PWM11, "PWM11"); ++FUNC_DECL_(PWM12, "PWM12"); ++FUNC_DECL_(PWM13, "PWM13"); ++FUNC_DECL_(PWM14, "PWM14"); ++FUNC_DECL_(PWM15, "PWM15"); ++FUNC_DECL_(WDTRST0N, "WDTRST0N"); ++FUNC_DECL_(WDTRST1N, "WDTRST1N"); ++FUNC_DECL_(WDTRST2N, "WDTRST2N"); ++FUNC_DECL_(WDTRST3N, "WDTRST3N"); ++FUNC_DECL_(WDTRST4N, "WDTRST4N"); ++FUNC_DECL_(WDTRST5N, "WDTRST5N"); ++FUNC_DECL_(WDTRST6N, "WDTRST6N"); ++FUNC_DECL_(WDTRST7N, "WDTRST7N"); ++FUNC_DECL_(FSI0, "FSI0"); ++FUNC_DECL_(FSI1, "FSI1"); ++FUNC_DECL_(FSI2, "FSI2"); ++FUNC_DECL_(FSI3, "FSI3"); ++FUNC_DECL_(SALT0, "SALT0"); ++FUNC_DECL_(SALT1, "SALT1"); ++FUNC_DECL_(SALT2, "SALT2"); ++FUNC_DECL_(SALT3, "SALT3"); ++FUNC_DECL_(SALT4, "SALT4"); ++FUNC_DECL_(SALT5, "SALT5"); ++FUNC_DECL_(SALT6, "SALT6"); ++FUNC_DECL_(SALT7, "SALT7"); ++FUNC_DECL_(SALT8, "SALT8"); ++FUNC_DECL_(SALT9, "SALT9"); ++FUNC_DECL_(SALT10, "SALT10"); ++FUNC_DECL_(SALT11, "SALT11"); ++FUNC_DECL_(ADC0, "ADC0"); ++FUNC_DECL_(ADC1, "ADC1"); ++FUNC_DECL_(ADC2, "ADC2"); ++FUNC_DECL_(ADC3, "ADC3"); ++FUNC_DECL_(ADC4, "ADC4"); ++FUNC_DECL_(ADC5, "ADC5"); ++FUNC_DECL_(ADC6, "ADC6"); ++FUNC_DECL_(ADC7, "ADC7"); ++FUNC_DECL_(ADC8, "ADC8"); ++FUNC_DECL_(ADC9, "ADC9"); ++FUNC_DECL_(ADC10, "ADC10"); ++FUNC_DECL_(ADC11, "ADC11"); ++FUNC_DECL_(ADC12, "ADC12"); ++FUNC_DECL_(ADC13, "ADC13"); ++FUNC_DECL_(ADC14, "ADC14"); ++FUNC_DECL_(ADC15, "ADC15"); ++FUNC_DECL_(AUXPWRGOOD0, "AUXPWRGOOD0"); ++FUNC_DECL_(AUXPWRGOOD1, "AUXPWRGOOD1"); ++FUNC_DECL_(SGPM0, "SGPM0", "DSGPM0"); ++FUNC_DECL_(SGPM1, "SGPM1"); ++FUNC_DECL_(LTPI_PS_I2C0, "LTPI_PS_I2C0"); ++FUNC_DECL_(LTPI_PS_I2C1, "LTPI_PS_I2C1"); ++FUNC_DECL_(LTPI_PS_I2C2, "LTPI_PS_I2C2"); ++FUNC_DECL_(LTPI_PS_I2C3, "LTPI_PS_I2C3"); ++FUNC_DECL_(I2C0, "I2C0", "DI2C0"); ++FUNC_DECL_(I2C1, "I2C1", "DI2C1"); ++FUNC_DECL_(I2C2, "I2C2", "DI2C2"); ++FUNC_DECL_(I2C3, "I2C3", "DI2C3"); ++FUNC_DECL_(I2C4, "I2C4"); ++FUNC_DECL_(I2C5, "I2C5"); ++FUNC_DECL_(I2C6, "I2C6"); ++FUNC_DECL_(I2C7, "I2C7"); ++FUNC_DECL_(I2C8, "I2C8", "DI2C8"); ++FUNC_DECL_(I2C9, "I2C9", "DI2C9"); ++FUNC_DECL_(I2C10, "I2C10", "DI2C10"); ++FUNC_DECL_(I2C11, "I2C11", "DI2C11"); ++FUNC_DECL_(I2C12, "I2C12", "DI2C12"); ++FUNC_DECL_(I2C13, "I2C13", "DI2C13"); ++FUNC_DECL_(I2C14, "I2C14", "DI2C14"); ++FUNC_DECL_(I2C15, "I2C15", "DI2C15"); ++FUNC_DECL_(SIOPBON1, "SIOPBON1"); ++FUNC_DECL_(SIOPBIN1, "SIOPBIN1"); ++FUNC_DECL_(SIOSCIN1, "SIOSCIN1"); ++FUNC_DECL_(SIOS3N1, "SIOS3N1"); ++FUNC_DECL_(SIOS5N1, "SIOS5N1"); ++FUNC_DECL_(SIOPWREQN1, "SIOPWREQN1"); ++FUNC_DECL_(SIOONCTRLN1, "SIOONCTRLN1"); ++FUNC_DECL_(SIOPWRGD1, "SIOPWRGD1"); ++FUNC_DECL_(I3C0, "HVI3C0"); ++FUNC_DECL_(I3C1, "HVI3C1"); ++FUNC_DECL_(I3C2, "HVI3C2"); ++FUNC_DECL_(I3C3, "HVI3C3"); ++FUNC_DECL_(I3C4, "I3C4"); ++FUNC_DECL_(I3C5, "I3C5"); ++FUNC_DECL_(I3C6, "I3C6"); ++FUNC_DECL_(I3C7, "I3C7"); ++FUNC_DECL_(I3C8, "I3C8"); ++FUNC_DECL_(I3C9, "I3C9"); ++FUNC_DECL_(I3C10, "I3C10"); ++FUNC_DECL_(I3C11, "I3C11"); ++FUNC_DECL_(I3C12, "HVI3C12"); ++FUNC_DECL_(I3C13, "HVI3C13"); ++FUNC_DECL_(I3C14, "HVI3C14"); ++FUNC_DECL_(I3C15, "HVI3C15"); ++FUNC_DECL_(LTPI, "LTPI"); ++FUNC_DECL_(SPI0, "SPI0"); ++FUNC_DECL_(QSPI0, "QSPI0"); ++FUNC_DECL_(SPI0CS1, "SPI0CS1"); ++FUNC_DECL_(SPI0ABR, "SPI0ABR"); ++FUNC_DECL_(SPI0WPN, "SPI0WPN"); ++FUNC_DECL_(SPI1, "SPI1"); ++FUNC_DECL_(QSPI1, "QSPI1"); ++FUNC_DECL_(SPI1CS1, "SPI1CS1"); ++FUNC_DECL_(SPI1ABR, "SPI1ABR"); ++FUNC_DECL_(SPI1WPN, "SPI1WPN"); ++FUNC_DECL_(SPI2, "SPI2"); ++FUNC_DECL_(QSPI2, "QSPI2"); ++FUNC_DECL_(SPI2CS1, "SPI2CS1"); ++FUNC_DECL_(THRU2, "THRU2"); ++FUNC_DECL_(THRU3, "THRU3"); ++FUNC_DECL_(JTAGM1, "JTAGM1"); ++FUNC_DECL_(MDIO0, "MDIO0"); ++FUNC_DECL_(MDIO1, "MDIO1"); ++FUNC_DECL_(MDIO2, "MDIO2"); ++FUNC_DECL_(FWQSPI, "FWQSPI"); ++FUNC_DECL_(FWSPIABR, "FWSPIABR"); ++FUNC_DECL_(FWSPIWPN, "FWSPIWPN"); ++FUNC_DECL_(RGMII0, "RGMII0"); ++FUNC_DECL_(RGMII1, "RGMII1"); ++FUNC_DECL_(RMII0, "RMII0"); ++FUNC_DECL_(RMII0RCLKO, "RMII0RCLKO"); ++FUNC_DECL_(RMII1, "RMII1"); ++FUNC_DECL_(RMII1RCLKO, "RMII1RCLKO"); ++FUNC_DECL_(VGA, "VGA"); ++FUNC_DECL_(SGPS, "SGPS"); ++FUNC_DECL_(I2CF0, "I2CF0"); ++FUNC_DECL_(I2CF1, "I2CF1"); ++FUNC_DECL_(I2CF2, "I2CF2"); ++FUNC_DECL_(CANBUS, "CANBUS"); ++FUNC_DECL_(USBUART, "USBUART"); ++FUNC_DECL_(HBLED, "HBLED"); ++FUNC_DECL_(MACLINK0, "MACLINK0"); ++FUNC_DECL_(MACLINK1, "MACLINK1"); ++FUNC_DECL_(MACLINK2, "MACLINK2"); ++FUNC_DECL_(SMON0, "SMON0"); ++FUNC_DECL_(SMON1, "SMON1"); ++FUNC_DECL_(SGMII, "SGMII"); ++FUNC_DECL_(PCIERC, "PE2SGRSTN"); ++FUNC_DECL_(USB2C, "USB2CUD", "USB2CD", "USB2CH", "USB2CU"); ++FUNC_DECL_(USB2D, "USB2DD", "USB2DH"); ++ ++static struct aspeed_pin_function aspeed_g7_soc1_funcs[] = { ++ ASPEED_PINCTRL_FUNC(ESPI0), ++ ASPEED_PINCTRL_FUNC(ESPI1), ++ ASPEED_PINCTRL_FUNC(LPC0), ++ ASPEED_PINCTRL_FUNC(LPC1), ++ ASPEED_PINCTRL_FUNC(VPI), ++ ASPEED_PINCTRL_FUNC(SD), ++ ASPEED_PINCTRL_FUNC(OSCCLK), ++ ASPEED_PINCTRL_FUNC(TACH0), ++ ASPEED_PINCTRL_FUNC(TACH1), ++ ASPEED_PINCTRL_FUNC(TACH2), ++ ASPEED_PINCTRL_FUNC(TACH3), ++ ASPEED_PINCTRL_FUNC(TACH4), ++ ASPEED_PINCTRL_FUNC(TACH5), ++ ASPEED_PINCTRL_FUNC(TACH6), ++ ASPEED_PINCTRL_FUNC(TACH7), ++ ASPEED_PINCTRL_FUNC(THRU0), ++ ASPEED_PINCTRL_FUNC(THRU1), ++ ASPEED_PINCTRL_FUNC(NTCS5), ++ ASPEED_PINCTRL_FUNC(NTCS5), ++ ASPEED_PINCTRL_FUNC(NDSR5), ++ ASPEED_PINCTRL_FUNC(NRI5), ++ ASPEED_PINCTRL_FUNC(NRI5), ++ ASPEED_PINCTRL_FUNC(SALT12), ++ ASPEED_PINCTRL_FUNC(SALT13), ++ ASPEED_PINCTRL_FUNC(SALT14), ++ ASPEED_PINCTRL_FUNC(SALT15), ++ ASPEED_PINCTRL_FUNC(TACH8), ++ ASPEED_PINCTRL_FUNC(TACH9), ++ ASPEED_PINCTRL_FUNC(TACH10), ++ ASPEED_PINCTRL_FUNC(TACH11), ++ ASPEED_PINCTRL_FUNC(TACH12), ++ ASPEED_PINCTRL_FUNC(TACH13), ++ ASPEED_PINCTRL_FUNC(TACH14), ++ ASPEED_PINCTRL_FUNC(TACH15), ++ ASPEED_PINCTRL_FUNC(SPIM0), ++ ASPEED_PINCTRL_FUNC(PWM0), ++ ASPEED_PINCTRL_FUNC(PWM1), ++ ASPEED_PINCTRL_FUNC(PWM2), ++ ASPEED_PINCTRL_FUNC(PWM3), ++ ASPEED_PINCTRL_FUNC(PWM4), ++ ASPEED_PINCTRL_FUNC(PWM5), ++ ASPEED_PINCTRL_FUNC(PWM6), ++ ASPEED_PINCTRL_FUNC(PWM7), ++ ASPEED_PINCTRL_FUNC(SIOPBON0), ++ ASPEED_PINCTRL_FUNC(SIOPBIN0), ++ ASPEED_PINCTRL_FUNC(SIOSCIN0), ++ ASPEED_PINCTRL_FUNC(SIOS3N0), ++ ASPEED_PINCTRL_FUNC(SIOS5N0), ++ ASPEED_PINCTRL_FUNC(SIOPWREQN0), ++ ASPEED_PINCTRL_FUNC(SIOONCTRLN0), ++ ASPEED_PINCTRL_FUNC(SIOPWRGD0), ++ ASPEED_PINCTRL_FUNC(UART0), ++ ASPEED_PINCTRL_FUNC(UART1), ++ ASPEED_PINCTRL_FUNC(UART2), ++ ASPEED_PINCTRL_FUNC(UART3), ++ ASPEED_PINCTRL_FUNC(UART5), ++ ASPEED_PINCTRL_FUNC(UART6), ++ ASPEED_PINCTRL_FUNC(UART7), ++ ASPEED_PINCTRL_FUNC(UART8), ++ ASPEED_PINCTRL_FUNC(UART9), ++ ASPEED_PINCTRL_FUNC(UART10), ++ ASPEED_PINCTRL_FUNC(UART11), ++ ASPEED_PINCTRL_FUNC(SPIM1), ++ ASPEED_PINCTRL_FUNC(PWM7), ++ ASPEED_PINCTRL_FUNC(PWM8), ++ ASPEED_PINCTRL_FUNC(PWM9), ++ ASPEED_PINCTRL_FUNC(PWM10), ++ ASPEED_PINCTRL_FUNC(PWM11), ++ ASPEED_PINCTRL_FUNC(PWM12), ++ ASPEED_PINCTRL_FUNC(PWM13), ++ ASPEED_PINCTRL_FUNC(PWM14), ++ ASPEED_PINCTRL_FUNC(PWM15), ++ ASPEED_PINCTRL_FUNC(WDTRST0N), ++ ASPEED_PINCTRL_FUNC(WDTRST1N), ++ ASPEED_PINCTRL_FUNC(WDTRST2N), ++ ASPEED_PINCTRL_FUNC(WDTRST3N), ++ ASPEED_PINCTRL_FUNC(WDTRST4N), ++ ASPEED_PINCTRL_FUNC(WDTRST5N), ++ ASPEED_PINCTRL_FUNC(WDTRST6N), ++ ASPEED_PINCTRL_FUNC(WDTRST7N), ++ ASPEED_PINCTRL_FUNC(FSI0), ++ ASPEED_PINCTRL_FUNC(FSI1), ++ ASPEED_PINCTRL_FUNC(FSI2), ++ ASPEED_PINCTRL_FUNC(FSI3), ++ ASPEED_PINCTRL_FUNC(SALT0), ++ ASPEED_PINCTRL_FUNC(SALT1), ++ ASPEED_PINCTRL_FUNC(SALT2), ++ ASPEED_PINCTRL_FUNC(SALT3), ++ ASPEED_PINCTRL_FUNC(SALT4), ++ ASPEED_PINCTRL_FUNC(SALT5), ++ ASPEED_PINCTRL_FUNC(SALT6), ++ ASPEED_PINCTRL_FUNC(SALT7), ++ ASPEED_PINCTRL_FUNC(SALT8), ++ ASPEED_PINCTRL_FUNC(SALT9), ++ ASPEED_PINCTRL_FUNC(SALT10), ++ ASPEED_PINCTRL_FUNC(SALT11), ++ ASPEED_PINCTRL_FUNC(ADC0), ++ ASPEED_PINCTRL_FUNC(ADC1), ++ ASPEED_PINCTRL_FUNC(ADC2), ++ ASPEED_PINCTRL_FUNC(ADC3), ++ ASPEED_PINCTRL_FUNC(ADC4), ++ ASPEED_PINCTRL_FUNC(ADC5), ++ ASPEED_PINCTRL_FUNC(ADC6), ++ ASPEED_PINCTRL_FUNC(ADC7), ++ ASPEED_PINCTRL_FUNC(ADC8), ++ ASPEED_PINCTRL_FUNC(ADC9), ++ ASPEED_PINCTRL_FUNC(ADC10), ++ ASPEED_PINCTRL_FUNC(ADC11), ++ ASPEED_PINCTRL_FUNC(ADC12), ++ ASPEED_PINCTRL_FUNC(ADC13), ++ ASPEED_PINCTRL_FUNC(ADC14), ++ ASPEED_PINCTRL_FUNC(ADC15), ++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD0), ++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD1), ++ ASPEED_PINCTRL_FUNC(SGPM0), ++ ASPEED_PINCTRL_FUNC(SGPM1), ++ ASPEED_PINCTRL_FUNC(SPIM2), ++ ASPEED_PINCTRL_FUNC(LTPI_PS_I2C0), ++ ASPEED_PINCTRL_FUNC(LTPI_PS_I2C1), ++ ASPEED_PINCTRL_FUNC(LTPI_PS_I2C2), ++ ASPEED_PINCTRL_FUNC(LTPI_PS_I2C3), ++ ASPEED_PINCTRL_FUNC(I2C0), ++ ASPEED_PINCTRL_FUNC(I2C1), ++ ASPEED_PINCTRL_FUNC(I2C2), ++ ASPEED_PINCTRL_FUNC(I2C3), ++ ASPEED_PINCTRL_FUNC(I2C4), ++ ASPEED_PINCTRL_FUNC(I2C5), ++ ASPEED_PINCTRL_FUNC(I2C6), ++ ASPEED_PINCTRL_FUNC(I2C7), ++ ASPEED_PINCTRL_FUNC(I2C8), ++ ASPEED_PINCTRL_FUNC(I2C9), ++ ASPEED_PINCTRL_FUNC(I2C10), ++ ASPEED_PINCTRL_FUNC(I2C11), ++ ASPEED_PINCTRL_FUNC(I2C12), ++ ASPEED_PINCTRL_FUNC(I2C13), ++ ASPEED_PINCTRL_FUNC(I2C14), ++ ASPEED_PINCTRL_FUNC(I2C15), ++ ASPEED_PINCTRL_FUNC(SIOPBON1), ++ ASPEED_PINCTRL_FUNC(SIOPBIN1), ++ ASPEED_PINCTRL_FUNC(SIOSCIN1), ++ ASPEED_PINCTRL_FUNC(SIOS3N1), ++ ASPEED_PINCTRL_FUNC(SIOS5N1), ++ ASPEED_PINCTRL_FUNC(SIOPWREQN1), ++ ASPEED_PINCTRL_FUNC(SIOONCTRLN1), ++ ASPEED_PINCTRL_FUNC(SIOPWRGD1), ++ ASPEED_PINCTRL_FUNC(I3C0), ++ ASPEED_PINCTRL_FUNC(I3C1), ++ ASPEED_PINCTRL_FUNC(I3C2), ++ ASPEED_PINCTRL_FUNC(I3C3), ++ ASPEED_PINCTRL_FUNC(I3C4), ++ ASPEED_PINCTRL_FUNC(I3C5), ++ ASPEED_PINCTRL_FUNC(I3C6), ++ ASPEED_PINCTRL_FUNC(I3C7), ++ ASPEED_PINCTRL_FUNC(I3C8), ++ ASPEED_PINCTRL_FUNC(I3C9), ++ ASPEED_PINCTRL_FUNC(I3C10), ++ ASPEED_PINCTRL_FUNC(I3C11), ++ ASPEED_PINCTRL_FUNC(I3C12), ++ ASPEED_PINCTRL_FUNC(I3C13), ++ ASPEED_PINCTRL_FUNC(I3C14), ++ ASPEED_PINCTRL_FUNC(I3C15), ++ ASPEED_PINCTRL_FUNC(LTPI), ++ ASPEED_PINCTRL_FUNC(SPI0), ++ ASPEED_PINCTRL_FUNC(QSPI0), ++ ASPEED_PINCTRL_FUNC(SPI0CS1), ++ ASPEED_PINCTRL_FUNC(SPI0ABR), ++ ASPEED_PINCTRL_FUNC(SPI0WPN), ++ ASPEED_PINCTRL_FUNC(SPI1), ++ ASPEED_PINCTRL_FUNC(QSPI1), ++ ASPEED_PINCTRL_FUNC(SPI1CS1), ++ ASPEED_PINCTRL_FUNC(SPI1ABR), ++ ASPEED_PINCTRL_FUNC(SPI1WPN), ++ ASPEED_PINCTRL_FUNC(SPI2), ++ ASPEED_PINCTRL_FUNC(QSPI2), ++ ASPEED_PINCTRL_FUNC(SPI2CS1), ++ ASPEED_PINCTRL_FUNC(THRU2), ++ ASPEED_PINCTRL_FUNC(THRU3), ++ ASPEED_PINCTRL_FUNC(JTAGM1), ++ ASPEED_PINCTRL_FUNC(MDIO0), ++ ASPEED_PINCTRL_FUNC(MDIO1), ++ ASPEED_PINCTRL_FUNC(MDIO2), ++ ASPEED_PINCTRL_FUNC(FWQSPI), ++ ASPEED_PINCTRL_FUNC(FWSPIABR), ++ ASPEED_PINCTRL_FUNC(FWSPIWPN), ++ ASPEED_PINCTRL_FUNC(RGMII0), ++ ASPEED_PINCTRL_FUNC(RGMII1), ++ ASPEED_PINCTRL_FUNC(RMII0), ++ ASPEED_PINCTRL_FUNC(RMII0RCLKO), ++ ASPEED_PINCTRL_FUNC(RMII1), ++ ASPEED_PINCTRL_FUNC(RMII1RCLKO), ++ ASPEED_PINCTRL_FUNC(VGA), ++ ASPEED_PINCTRL_FUNC(SGPS), ++ ASPEED_PINCTRL_FUNC(I2CF0), ++ ASPEED_PINCTRL_FUNC(I2CF1), ++ ASPEED_PINCTRL_FUNC(I2CF2), ++ ASPEED_PINCTRL_FUNC(CANBUS), ++ ASPEED_PINCTRL_FUNC(USBUART), ++ ASPEED_PINCTRL_FUNC(HBLED), ++ ASPEED_PINCTRL_FUNC(MACLINK0), ++ ASPEED_PINCTRL_FUNC(MACLINK1), ++ ASPEED_PINCTRL_FUNC(MACLINK2), ++ ASPEED_PINCTRL_FUNC(SMON0), ++ ASPEED_PINCTRL_FUNC(SMON1), ++ ASPEED_PINCTRL_FUNC(SGMII), ++ ASPEED_PINCTRL_FUNC(PCIERC), ++ ASPEED_PINCTRL_FUNC(USB2C), ++ ASPEED_PINCTRL_FUNC(USB2D), ++}; ++ ++/* number, name, drv_data */ ++static const struct pinctrl_pin_desc aspeed_g7_soc1_pins[] = { ++ PINCTRL_PIN(C16, "C16"), ++ PINCTRL_PIN(C14, "C14"), ++ PINCTRL_PIN(C11, "C11"), ++ PINCTRL_PIN(D9, "D9"), ++ PINCTRL_PIN(F14, "F14"), ++ PINCTRL_PIN(D10, "D10"), ++ PINCTRL_PIN(C12, "C12"), ++ PINCTRL_PIN(C13, "C13"), ++ PINCTRL_PIN(AC26, "AC26"), ++ PINCTRL_PIN(AA25, "AA25"), ++ PINCTRL_PIN(AB23, "AB23"), ++ PINCTRL_PIN(U22, "U22"), ++ PINCTRL_PIN(V21, "V21"), ++ PINCTRL_PIN(N26, "N26"), ++ PINCTRL_PIN(P25, "P25"), ++ PINCTRL_PIN(N25, "N25"), ++ PINCTRL_PIN(V23, "V23"), ++ PINCTRL_PIN(W22, "W22"), ++ PINCTRL_PIN(AB26, "AB26"), ++ PINCTRL_PIN(AD26, "AD26"), ++ PINCTRL_PIN(P26, "P26"), ++ PINCTRL_PIN(AE26, "AE26"), ++ PINCTRL_PIN(AF26, "AF26"), ++ PINCTRL_PIN(AF25, "AF25"), ++ PINCTRL_PIN(AE25, "AE25"), ++ PINCTRL_PIN(AD25, "AD25"), ++ PINCTRL_PIN(AF23, "AF23"), ++ PINCTRL_PIN(AF20, "AF20"), ++ PINCTRL_PIN(AF21, "AF21"), ++ PINCTRL_PIN(AE21, "AE21"), ++ PINCTRL_PIN(AE23, "AE23"), ++ PINCTRL_PIN(AD22, "AD22"), ++ PINCTRL_PIN(AF17, "AF17"), ++ PINCTRL_PIN(AA16, "AA16"), ++ PINCTRL_PIN(Y16, "Y16"), ++ PINCTRL_PIN(V17, "V17"), ++ PINCTRL_PIN(J13, "J13"), ++ PINCTRL_PIN(AB16, "AB16"), ++ PINCTRL_PIN(AC16, "AC16"), ++ PINCTRL_PIN(AF16, "AF16"), ++ PINCTRL_PIN(AA15, "AA15"), ++ PINCTRL_PIN(AB15, "AB15"), ++ PINCTRL_PIN(AC15, "AC15"), ++ PINCTRL_PIN(AD15, "AD15"), ++ PINCTRL_PIN(Y15, "Y15"), ++ PINCTRL_PIN(AA14, "AA14"), ++ PINCTRL_PIN(W16, "W16"), ++ PINCTRL_PIN(V16, "V16"), ++ PINCTRL_PIN(AB18, "AB18"), ++ PINCTRL_PIN(AC18, "AC18"), ++ PINCTRL_PIN(K13, "K13"), ++ PINCTRL_PIN(AA17, "AA17"), ++ PINCTRL_PIN(AB17, "AB17"), ++ PINCTRL_PIN(AD16, "AD16"), ++ PINCTRL_PIN(AC17, "AC17"), ++ PINCTRL_PIN(AD17, "AD17"), ++ PINCTRL_PIN(AE16, "AE16"), ++ PINCTRL_PIN(AE17, "AE17"), ++ PINCTRL_PIN(AB24, "AB24"), ++ PINCTRL_PIN(W26, "W26"), ++ PINCTRL_PIN(HOLE0, "HOLE0"), ++ PINCTRL_PIN(HOLE1, "HOLE1"), ++ PINCTRL_PIN(HOLE2, "HOLE2"), ++ PINCTRL_PIN(HOLE3, "HOLE3"), ++ PINCTRL_PIN(W25, "W25"), ++ PINCTRL_PIN(Y23, "Y23"), ++ PINCTRL_PIN(Y24, "Y24"), ++ PINCTRL_PIN(W21, "W21"), ++ PINCTRL_PIN(AA23, "AA23"), ++ PINCTRL_PIN(AC22, "AC22"), ++ PINCTRL_PIN(AB22, "AB22"), ++ PINCTRL_PIN(Y21, "Y21"), ++ PINCTRL_PIN(AE20, "AE20"), ++ PINCTRL_PIN(AF19, "AF19"), ++ PINCTRL_PIN(Y22, "Y22"), ++ PINCTRL_PIN(AA20, "AA20"), ++ PINCTRL_PIN(AA22, "AA22"), ++ PINCTRL_PIN(AB20, "AB20"), ++ PINCTRL_PIN(AF18, "AF18"), ++ PINCTRL_PIN(AE19, "AE19"), ++ PINCTRL_PIN(AD20, "AD20"), ++ PINCTRL_PIN(AC20, "AC20"), ++ PINCTRL_PIN(AA21, "AA21"), ++ PINCTRL_PIN(AB21, "AB21"), ++ PINCTRL_PIN(AC19, "AC19"), ++ PINCTRL_PIN(AE18, "AE18"), ++ PINCTRL_PIN(AD19, "AD19"), ++ PINCTRL_PIN(AD18, "AD18"), ++ PINCTRL_PIN(U25, "U25"), ++ PINCTRL_PIN(U26, "U26"), ++ PINCTRL_PIN(Y26, "Y26"), ++ PINCTRL_PIN(AA24, "AA24"), ++ PINCTRL_PIN(R25, "R25"), ++ PINCTRL_PIN(AA26, "AA26"), ++ PINCTRL_PIN(R26, "R26"), ++ PINCTRL_PIN(Y25, "Y25"), ++ PINCTRL_PIN(B16, "B16"), ++ PINCTRL_PIN(D14, "D14"), ++ PINCTRL_PIN(B15, "B15"), ++ PINCTRL_PIN(B14, "B14"), ++ PINCTRL_PIN(C17, "C17"), ++ PINCTRL_PIN(B13, "B13"), ++ PINCTRL_PIN(E14, "E14"), ++ PINCTRL_PIN(C15, "C15"), ++ PINCTRL_PIN(D24, "D24"), ++ PINCTRL_PIN(B23, "B23"), ++ PINCTRL_PIN(B22, "B22"), ++ PINCTRL_PIN(C23, "C23"), ++ PINCTRL_PIN(B18, "B18"), ++ PINCTRL_PIN(B21, "B21"), ++ PINCTRL_PIN(M15, "M15"), ++ PINCTRL_PIN(B19, "B19"), ++ PINCTRL_PIN(B26, "B26"), ++ PINCTRL_PIN(A25, "A25"), ++ PINCTRL_PIN(A24, "A24"), ++ PINCTRL_PIN(B24, "B24"), ++ PINCTRL_PIN(E26, "E26"), ++ PINCTRL_PIN(A21, "A21"), ++ PINCTRL_PIN(A19, "A19"), ++ PINCTRL_PIN(A18, "A18"), ++ PINCTRL_PIN(D26, "D26"), ++ PINCTRL_PIN(C26, "C26"), ++ PINCTRL_PIN(A23, "A23"), ++ PINCTRL_PIN(A22, "A22"), ++ PINCTRL_PIN(B25, "B25"), ++ PINCTRL_PIN(F26, "F26"), ++ PINCTRL_PIN(A26, "A26"), ++ PINCTRL_PIN(A14, "A14"), ++ PINCTRL_PIN(E10, "E10"), ++ PINCTRL_PIN(E13, "E13"), ++ PINCTRL_PIN(D12, "D12"), ++ PINCTRL_PIN(F10, "F10"), ++ PINCTRL_PIN(E11, "E11"), ++ PINCTRL_PIN(F11, "F11"), ++ PINCTRL_PIN(F13, "F13"), ++ PINCTRL_PIN(N15, "N15"), ++ PINCTRL_PIN(C20, "C20"), ++ PINCTRL_PIN(C19, "C19"), ++ PINCTRL_PIN(A8, "A8"), ++ PINCTRL_PIN(R14, "R14"), ++ PINCTRL_PIN(A7, "A7"), ++ PINCTRL_PIN(P14, "P14"), ++ PINCTRL_PIN(D20, "D20"), ++ PINCTRL_PIN(A6, "A6"), ++ PINCTRL_PIN(B6, "B6"), ++ PINCTRL_PIN(N14, "N14"), ++ PINCTRL_PIN(B7, "B7"), ++ PINCTRL_PIN(B8, "B8"), ++ PINCTRL_PIN(B9, "B9"), ++ PINCTRL_PIN(M14, "M14"), ++ PINCTRL_PIN(J11, "J11"), ++ PINCTRL_PIN(E7, "E7"), ++ PINCTRL_PIN(D19, "D19"), ++ PINCTRL_PIN(B11, "B11"), ++ PINCTRL_PIN(D15, "D15"), ++ PINCTRL_PIN(B12, "B12"), ++ PINCTRL_PIN(B10, "B10"), ++ PINCTRL_PIN(P13, "P13"), ++ PINCTRL_PIN(C18, "C18"), ++ PINCTRL_PIN(C6, "C6"), ++ PINCTRL_PIN(C7, "C7"), ++ PINCTRL_PIN(D7, "D7"), ++ PINCTRL_PIN(N13, "N13"), ++ PINCTRL_PIN(C8, "C8"), ++ PINCTRL_PIN(C9, "C9"), ++ PINCTRL_PIN(C10, "C10"), ++ PINCTRL_PIN(M16, "M16"), ++ PINCTRL_PIN(A15, "A15"), ++ PINCTRL_PIN(G11, "G11"), ++ PINCTRL_PIN(H7, "H7"), ++ PINCTRL_PIN(H8, "H8"), ++ PINCTRL_PIN(H9, "H9"), ++ PINCTRL_PIN(H10, "H10"), ++ PINCTRL_PIN(H11, "H11"), ++ PINCTRL_PIN(J9, "J9"), ++ PINCTRL_PIN(J10, "J10"), ++ PINCTRL_PIN(E9, "E9"), ++ PINCTRL_PIN(F9, "F9"), ++ PINCTRL_PIN(F8, "F8"), ++ PINCTRL_PIN(M13, "M13"), ++ PINCTRL_PIN(F7, "F7"), ++ PINCTRL_PIN(D8, "D8"), ++ PINCTRL_PIN(E8, "E8"), ++ PINCTRL_PIN(L12, "L12"), ++ PINCTRL_PIN(F12, "F12"), ++ PINCTRL_PIN(E12, "E12"), ++ PINCTRL_PIN(J12, "J12"), ++ PINCTRL_PIN(G7, "G7"), ++ PINCTRL_PIN(G8, "G8"), ++ PINCTRL_PIN(G9, "G9"), ++ PINCTRL_PIN(G10, "G10"), ++ PINCTRL_PIN(K12, "K12"), ++ PINCTRL_PIN(W17, "W17"), ++ PINCTRL_PIN(V18, "V18"), ++ PINCTRL_PIN(W18, "W18"), ++ PINCTRL_PIN(Y17, "Y17"), ++ PINCTRL_PIN(AA18, "AA18"), ++ PINCTRL_PIN(AA13, "AA13"), ++ PINCTRL_PIN(Y18, "Y18"), ++ PINCTRL_PIN(AA12, "AA12"), ++ PINCTRL_PIN(W20, "W20"), ++ PINCTRL_PIN(V20, "V20"), ++ PINCTRL_PIN(Y11, "Y11"), ++ PINCTRL_PIN(V14, "V14"), ++ PINCTRL_PIN(V19, "V19"), ++ PINCTRL_PIN(W14, "W14"), ++ PINCTRL_PIN(Y20, "Y20"), ++ PINCTRL_PIN(AB19, "AB19"), ++ PINCTRL_PIN(U21, "U21"), ++ PINCTRL_PIN(T24, "T24"), ++ PINCTRL_PIN(V24, "V24"), ++ PINCTRL_PIN(V22, "V22"), ++ PINCTRL_PIN(T23, "T23"), ++ PINCTRL_PIN(AC25, "AC25"), ++ PINCTRL_PIN(AB25, "AB25"), ++ PINCTRL_PIN(AC24, "AC24"), ++ PINCTRL_PIN(SGMII0, "SGMII0"), ++ PINCTRL_PIN(PCIERC2_PERST, "PCIERC2_PERST"), ++ PINCTRL_PIN(PORTC_MODE, "PORTC_MODE"), ++ PINCTRL_PIN(PORTD_MODE, "PORTD_MODE"), ++}; ++ ++FUNCFG_DESCL(C16, PIN_CFG(ESPI1, SCU400, GENMASK(2, 0), 1), ++ PIN_CFG(LPC1, SCU400, GENMASK(2, 0), 2), ++ PIN_CFG(SD, SCU400, GENMASK(2, 0), 3), ++ PIN_CFG(DI2C0, SCU400, GENMASK(2, 0), 4), ++ PIN_CFG(VPI, SCU400, GENMASK(2, 0), 5)); ++FUNCFG_DESCL(C14, PIN_CFG(ESPI1, SCU400, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(LPC1, SCU400, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(SD, SCU400, GENMASK(6, 4), (3 << 4)), ++ PIN_CFG(DI2C1, SCU400, GENMASK(6, 4), (4 << 4)), ++ PIN_CFG(VPI, SCU400, GENMASK(6, 4), (5 << 4))); ++FUNCFG_DESCL(C11, PIN_CFG(ESPI1, SCU400, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(LPC1, SCU400, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(SD, SCU400, GENMASK(10, 8), (3 << 8)), ++ PIN_CFG(DI2C3, SCU400, GENMASK(10, 8), (4 << 8)), ++ PIN_CFG(VPI, SCU400, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(D9, PIN_CFG(ESPI1, SCU400, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(LPC1, SCU400, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(SD, SCU400, GENMASK(14, 12), (3 << 12)), ++ PIN_CFG(DI2C0, SCU400, GENMASK(14, 12), (4 << 12)), ++ PIN_CFG(VPI, SCU400, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(F14, PIN_CFG(ESPI1, SCU400, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(LPC1, SCU400, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(SD, SCU400, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(DI2C1, SCU400, GENMASK(18, 16), (4 << 16)), ++ PIN_CFG(VPI, SCU400, GENMASK(18, 16), (5 << 16))); ++FUNCFG_DESCL(D10, PIN_CFG(ESPI1, SCU400, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(LPC1, SCU400, GENMASK(22, 20), (2 << 20)), ++ PIN_CFG(SD, SCU400, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(DI2C2, SCU400, GENMASK(22, 20), (4 << 20)), ++ PIN_CFG(VPI, SCU400, GENMASK(22, 20), (5 << 20))); ++FUNCFG_DESCL(C12, PIN_CFG(ESPI1, SCU400, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(LPC1, SCU400, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(SD, SCU400, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(DI2C2, SCU400, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(C13, PIN_CFG(ESPI1, SCU400, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(LPC1, SCU400, GENMASK(30, 28), (2 << 28)), ++ PIN_CFG(SD, SCU400, GENMASK(30, 28), (3 << 28)), ++ PIN_CFG(DI2C3, SCU400, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(AC26, PIN_CFG(TACH0, SCU404, GENMASK(2, 0), 1), ++ PIN_CFG(THRU0, SCU404, GENMASK(2, 0), 2), ++ PIN_CFG(VPI, SCU404, GENMASK(2, 0), 3)); ++FUNCFG_DESCL(AA25, PIN_CFG(TACH1, SCU404, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(THRU0, SCU404, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(VPI, SCU404, GENMASK(6, 4), (3 << 4))); ++FUNCFG_DESCL(AB23, PIN_CFG(TACH2, SCU404, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(THRU1, SCU404, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(VPI, SCU404, GENMASK(10, 8), (3 << 8))); ++FUNCFG_DESCL(U22, PIN_CFG(TACH3, SCU404, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(THRU1, SCU404, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(VPI, SCU404, GENMASK(14, 12), (3 << 12))); ++FUNCFG_DESCL(V21, PIN_CFG(TACH4, SCU404, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(VPI, SCU404, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(NCTS5, SCU404, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(N26, PIN_CFG(TACH5, SCU404, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(VPI, SCU404, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(NDCD5, SCU404, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(P25, PIN_CFG(TACH6, SCU404, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(VPI, SCU404, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(NDSR5, SCU404, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(N25, PIN_CFG(TACH7, SCU404, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(VPI, SCU404, GENMASK(30, 28), (3 << 28)), ++ PIN_CFG(NRI5, SCU404, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(V23, PIN_CFG(TACH8, SCU408, GENMASK(2, 0), 1), ++ PIN_CFG(VPI, SCU408, GENMASK(2, 0), 3), ++ PIN_CFG(NDTR5, SCU408, GENMASK(2, 0), 4)); ++FUNCFG_DESCL(W22, PIN_CFG(TACH9, SCU408, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(VPI, SCU408, GENMASK(6, 4), (3 << 4)), ++ PIN_CFG(NRTS5, SCU408, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(AB26, PIN_CFG(TACH10, SCU408, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SALT12, SCU408, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(VPI, SCU408, GENMASK(10, 8), (3 << 8)), ++ PIN_CFG(NCTS6, SCU408, GENMASK(10, 8), (4 << 8))); ++FUNCFG_DESCL(AD26, PIN_CFG(TACH11, SCU408, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SALT13, SCU408, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(VPI, SCU408, GENMASK(14, 12), (3 << 12)), ++ PIN_CFG(NDCD6, SCU408, GENMASK(14, 12), (4 << 12))); ++FUNCFG_DESCL(P26, PIN_CFG(TACH12, SCU408, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(SALT14, SCU408, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(VPI, SCU408, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(NDSR6, SCU408, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(AE26, PIN_CFG(TACH13, SCU408, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(SALT15, SCU408, GENMASK(22, 20), (2 << 20)), ++ PIN_CFG(VPI, SCU408, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(NRI6, SCU408, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(AF26, PIN_CFG(TACH14, SCU408, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(LPC0, SCU408, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(VPI, SCU408, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(NDTR6, SCU408, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(AF25, PIN_CFG(TACH15, SCU408, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(LPC0, SCU408, GENMASK(30, 28), (2 << 28)), ++ PIN_CFG(VPI, SCU408, GENMASK(30, 28), (3 << 28)), ++ PIN_CFG(NRTS6, SCU408, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(AE25, PIN_CFG(PWM0, SCU40C, GENMASK(2, 0), 1), ++ PIN_CFG(SIOPBON0, SCU40C, GENMASK(2, 0), 2), ++ PIN_CFG(VPI, SCU40C, GENMASK(2, 0), 3), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(2, 0), 4)); ++FUNCFG_DESCL(AD25, PIN_CFG(PWM1, SCU40C, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SIOPBIN0, SCU40C, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(VPI, SCU40C, GENMASK(6, 4), (3 << 4)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(AF23, PIN_CFG(PWM2, SCU40C, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SIOSCIN0, SCU40C, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(VPI, SCU40C, GENMASK(10, 8), (3 << 8)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(10, 8), (4 << 8))); ++FUNCFG_DESCL(AF20, PIN_CFG(PWM3, SCU40C, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SIOS3N0, SCU40C, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(VPI, SCU40C, GENMASK(14, 12), (3 << 12)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(14, 12), (4 << 12))); ++FUNCFG_DESCL(AF21, PIN_CFG(PWM4, SCU40C, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(SIOS5N0, SCU40C, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(VPI, SCU40C, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(AE21, PIN_CFG(PWM5, SCU40C, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(SIOPWREQN0, SCU40C, GENMASK(22, 20), (2 << 20)), ++ PIN_CFG(VPI, SCU40C, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(AE23, PIN_CFG(PWM6, SCU40C, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(SIOONCTRLN0, SCU40C, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(AD22, PIN_CFG(PWM7, SCU40C, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(SPIM0, SCU40C, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(AF17, PIN_CFG(NCTS0, SCU410, GENMASK(2, 0), 1), ++ PIN_CFG(SIOPBON1, SCU410, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(AA16, PIN_CFG(NDCD0, SCU410, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SIOPBIN1, SCU410, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(Y16, PIN_CFG(NDSR0, SCU410, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SIOSCIN1, SCU410, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(V17, PIN_CFG(NRI0, SCU410, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SIOS3N1, SCU410, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(J13, PIN_CFG(NDTR0, SCU410, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(SIOS5N1, SCU410, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(AB16, PIN_CFG(NRTS0, SCU410, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(SIOPWREQN1, SCU410, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(AC16, PIN_CFG(TXD0, SCU410, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(AF16, PIN_CFG(RXD0, SCU410, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(AA15, PIN_CFG(NCTS1, SCU414, GENMASK(2, 0), 1), ++ PIN_CFG(SIOONCTRLN1, SCU414, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(AB15, PIN_CFG(NDCD1, SCU414, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SIOPWRGD1, SCU414, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(AC15, PIN_CFG(NDSR1, SCU414, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SALT2, SCU414, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(AD15, PIN_CFG(NRI1, SCU414, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SALT3, SCU414, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(Y15, PIN_CFG(NDTR1, SCU414, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(AA14, PIN_CFG(NRTS1, SCU414, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(W16, PIN_CFG(TXD1, SCU414, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(V16, PIN_CFG(RXD1, SCU414, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(AB18, PIN_CFG(TXD2, SCU418, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(AC18, PIN_CFG(RXD2, SCU418, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(I2C12, SCU418, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(K13, PIN_CFG(TXD3, SCU418, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(WDTRST0N, SCU418, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(PWM8, SCU418, GENMASK(10, 8), (3 << 8)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(AA17, PIN_CFG(RXD3, SCU418, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(WDTRST1N, SCU418, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(PWM9, SCU418, GENMASK(14, 12), (3 << 12)), ++ PIN_CFG(I2C12, SCU418, GENMASK(14, 12), (4 << 12)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(AB17, PIN_CFG(TXD5, SCU418, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(WDTRST2N, SCU418, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(PWM10, SCU418, GENMASK(18, 16), (3 << 16)), ++ PIN_CFG(I2C13, SCU418, GENMASK(18, 16), (4 << 16)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(18, 16), (5 << 16))); ++FUNCFG_DESCL(AD16, PIN_CFG(RXD5, SCU418, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(WDTRST3N, SCU418, GENMASK(22, 20), (2 << 20)), ++ PIN_CFG(PWM11, SCU418, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(I2C13, SCU418, GENMASK(22, 20), (4 << 20)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(22, 20), (5 << 20))); ++FUNCFG_DESCL(AC17, PIN_CFG(TXD6, SCU418, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(SALT0, SCU418, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(PWM12, SCU418, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(I2C14, SCU418, GENMASK(26, 24), (4 << 24)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(26, 24), (5 << 24))); ++FUNCFG_DESCL(AD17, PIN_CFG(RXD6, SCU418, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(SALT1, SCU418, GENMASK(30, 28), (2 << 28)), ++ PIN_CFG(PWM13, SCU418, GENMASK(30, 28), (3 << 28)), ++ PIN_CFG(I2C14, SCU418, GENMASK(30, 28), (4 << 28)), ++ PIN_CFG(SPIM1, SCU418, GENMASK(30, 28), (5 << 28))); ++FUNCFG_DESCL(AE16, PIN_CFG(TXD7, SCU41C, GENMASK(2, 0), 1), ++ PIN_CFG(I2C15, SCU41C, GENMASK(2, 0), 2), ++ PIN_CFG(PWM14, SCU41C, GENMASK(2, 0), 3), ++ PIN_CFG(LPC1, SCU41C, GENMASK(2, 0), 4), ++ PIN_CFG(SPIM1, SCU41C, GENMASK(2, 0), 5)); ++FUNCFG_DESCL(AE17, PIN_CFG(RXD7, SCU41C, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(I2C15, SCU41C, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(PWM15, SCU41C, GENMASK(6, 4), (3 << 4)), ++ PIN_CFG(LPC1, SCU41C, GENMASK(6, 4), (4 << 4)), ++ PIN_CFG(SPIM1, SCU41C, GENMASK(6, 4), (5 << 4))); ++FUNCFG_DESCL(AB24, PIN_CFG(SGPM1, SCU41C, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(WDTRST7N, SCU41C, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(PESGWAKEN, SCU41C, GENMASK(10, 8), (3 << 8)), ++ PIN_CFG(SMON1, SCU41C, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(W26, PIN_CFG(SGPM1, SCU41C, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SMON1, SCU41C, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(HOLE0); ++FUNCFG_DESCL(HOLE1); ++FUNCFG_DESCL(HOLE2); ++FUNCFG_DESCL(HOLE3); ++FUNCFG_DESCL(W25, PIN_CFG(HVI3C12, SCU420, GENMASK(2, 0), 1), ++ PIN_CFG(DI2C12, SCU420, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(Y23, PIN_CFG(HVI3C12, SCU420, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(DI2C12, SCU420, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(Y24, PIN_CFG(HVI3C13, SCU420, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(DI2C13, SCU420, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(W21, PIN_CFG(HVI3C13, SCU420, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(DI2C13, SCU420, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(AA23, PIN_CFG(HVI3C14, SCU420, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(DI2C14, SCU420, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(AC22, PIN_CFG(HVI3C14, SCU420, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(DI2C14, SCU420, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(AB22, PIN_CFG(HVI3C15, SCU420, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(DI2C15, SCU420, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(Y21, PIN_CFG(HVI3C15, SCU420, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(DI2C15, SCU420, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(AE20, PIN_CFG(I3C4, SCU424, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(AF19, PIN_CFG(I3C4, SCU424, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(Y22, PIN_CFG(I3C5, SCU424, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(AA20, PIN_CFG(I3C5, SCU424, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(AA22, PIN_CFG(I3C6, SCU424, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(AB20, PIN_CFG(I3C6, SCU424, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(AF18, PIN_CFG(I3C7, SCU424, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(AE19, PIN_CFG(I3C7, SCU424, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(AD20, PIN_CFG(I3C8, SCU428, GENMASK(2, 0), 1), ++ PIN_CFG(FSI0, SCU428, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(AC20, PIN_CFG(I3C8, SCU428, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(FSI0, SCU428, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(AA21, PIN_CFG(I3C9, SCU428, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(FSI1, SCU428, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(AB21, PIN_CFG(I3C9, SCU428, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(FSI1, SCU428, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(AC19, PIN_CFG(I3C10, SCU428, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(FSI2, SCU428, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(AE18, PIN_CFG(I3C10, SCU428, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(FSI2, SCU428, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(AD19, PIN_CFG(I3C11, SCU428, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(FSI3, SCU428, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(AD18, PIN_CFG(I3C11, SCU428, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(FSI3, SCU428, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(U25, PIN_CFG(HVI3C0, SCU42C, GENMASK(2, 0), 1), ++ PIN_CFG(DI2C8, SCU42C, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(U26, PIN_CFG(HVI3C0, SCU42C, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(DI2C8, SCU42C, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(Y26, PIN_CFG(HVI3C1, SCU42C, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(DI2C9, SCU42C, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(AA24, PIN_CFG(HVI3C1, SCU42C, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(DI2C9, SCU42C, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(R25, PIN_CFG(HVI3C2, SCU42C, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(DI2C10, SCU42C, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(AA26, PIN_CFG(HVI3C2, SCU42C, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(DI2C10, SCU42C, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(R26, PIN_CFG(HVI3C3, SCU42C, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(DI2C11, SCU42C, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(Y25, PIN_CFG(HVI3C3, SCU42C, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(DI2C11, SCU42C, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(B16, PIN_CFG(ESPI0, SCU430, GENMASK(2, 0), 1), ++ PIN_CFG(LPC0, SCU430, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(D14, PIN_CFG(ESPI0, SCU430, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(LPC0, SCU430, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(B15, PIN_CFG(ESPI0, SCU430, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(LPC0, SCU430, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(B14, PIN_CFG(ESPI0, SCU430, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(LPC0, SCU430, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(C17, PIN_CFG(ESPI0, SCU430, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(LPC0, SCU430, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(OSCCLK, SCU430, GENMASK(18, 16), (3 << 16))); ++FUNCFG_DESCL(B13, PIN_CFG(ESPI0, SCU430, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(LPC0, SCU430, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(E14, PIN_CFG(ESPI0, SCU430, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(LPC0, SCU430, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(C15, PIN_CFG(ESPI0, SCU430, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(LPC0, SCU430, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(D24, PIN_CFG(SPI0, SCU434, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(B23, PIN_CFG(SPI0, SCU434, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(B22, PIN_CFG(SPI0, SCU434, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(C23, PIN_CFG(QSPI0, SCU434, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(B18, PIN_CFG(QSPI0, SCU434, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(B21, PIN_CFG(SPI0CS1, SCU434, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(M15, PIN_CFG(SPI0ABR, SCU434, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(TXD8, SCU434, GENMASK(26, 24), (3 << 24))); ++FUNCFG_DESCL(B19, PIN_CFG(SPI0WPN, SCU434, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(RXD8, SCU434, GENMASK(30, 28), (3 << 28))); ++FUNCFG_DESCL(B26, PIN_CFG(SPI1, SCU438, GENMASK(2, 0), 1), ++ PIN_CFG(TXD9, SCU438, GENMASK(2, 0), 3)); ++FUNCFG_DESCL(A25, PIN_CFG(SPI1, SCU438, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(RXD9, SCU438, GENMASK(6, 4), (3 << 4))); ++FUNCFG_DESCL(A24, PIN_CFG(SPI1, SCU438, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(TXD10, SCU438, GENMASK(10, 8), (3 << 8))); ++FUNCFG_DESCL(B24, PIN_CFG(QSPI1, SCU438, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(RXD10, SCU438, GENMASK(14, 12), (3 << 12))); ++FUNCFG_DESCL(E26, PIN_CFG(QSPI1, SCU438, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(TXD11, SCU438, GENMASK(18, 16), (3 << 16))); ++FUNCFG_DESCL(A21, PIN_CFG(SPI1CS1, SCU438, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(RXD11, SCU438, GENMASK(22, 20), (3 << 20))); ++FUNCFG_DESCL(A19, PIN_CFG(SPI1ABR, SCU438, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(THRU2, SCU438, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(A18, PIN_CFG(SPI1WPN, SCU438, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(THRU2, SCU438, GENMASK(30, 28), (4 << 28))); ++FUNCFG_DESCL(D26, PIN_CFG(SPI2, SCU43C, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(C26, PIN_CFG(SPI2, SCU43C, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(A23, PIN_CFG(SPI2, SCU43C, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(A22, PIN_CFG(SPI2, SCU43C, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(B25, PIN_CFG(QSPI2, SCU43C, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(THRU3, SCU43C, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(F26, PIN_CFG(QSPI2, SCU43C, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(THRU3, SCU43C, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(A26, PIN_CFG(SPI2CS1, SCU43C, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(A14, PIN_CFG(FWSPIABR, SCU43C, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(E10, PIN_CFG(MDIO2, SCU440, GENMASK(2, 0), 1), ++ PIN_CFG(PE2SGRSTN, SCU440, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(E13, PIN_CFG(MDIO2, SCU440, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(D12, PIN_CFG(JTAGM1, SCU440, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(F10, PIN_CFG(JTAGM1, SCU440, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(E11, PIN_CFG(JTAGM1, SCU440, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(F11, PIN_CFG(JTAGM1, SCU440, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(F13, PIN_CFG(JTAGM1, SCU440, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(N15, PIN_CFG(FWSPIWPEN, SCU440, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(C20, PIN_CFG(RGMII0, SCU444, GENMASK(2, 0), 1), ++ PIN_CFG(RMII0, SCU444, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(C19, PIN_CFG(RGMII0, SCU444, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(A8, PIN_CFG(RGMII0, SCU444, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(RMII0, SCU444, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(R14, PIN_CFG(RGMII0, SCU444, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(RMII0, SCU444, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(A7, PIN_CFG(RGMII0, SCU444, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(RMII0, SCU444, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(P14, PIN_CFG(RGMII0, SCU444, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(RMII0, SCU444, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(D20, PIN_CFG(RGMII0, SCU444, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(RMII0RCLKO, SCU444, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(A6, PIN_CFG(RGMII0, SCU444, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(RMII0, SCU444, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(B6, PIN_CFG(RGMII0, SCU448, GENMASK(2, 0), 1), ++ PIN_CFG(RMII0, SCU448, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(N14, PIN_CFG(RGMII0, SCU448, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(RMII0, SCU448, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(B7, PIN_CFG(RGMII0, SCU448, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(B8, PIN_CFG(RGMII0, SCU448, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(B9, PIN_CFG(MDIO0, SCU448, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(M14, PIN_CFG(MDIO0, SCU448, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(J11, PIN_CFG(VGA, SCU448, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(E7, PIN_CFG(VGA, SCU448, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(D19, PIN_CFG(RGMII1, SCU44C, GENMASK(2, 0), 1), ++ PIN_CFG(RMII1, SCU44C, GENMASK(2, 0), 2), ++ PIN_CFG(DSGPM0, SCU44C, GENMASK(2, 0), 4)); ++FUNCFG_DESCL(B11, PIN_CFG(RGMII1, SCU44C, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SGPS, SCU44C, GENMASK(6, 4), (5 << 4))); ++FUNCFG_DESCL(D15, PIN_CFG(RGMII1, SCU44C, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(RMII1, SCU44C, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(TXD3, SCU44C, GENMASK(10, 8), (4 << 8))); ++FUNCFG_DESCL(B12, PIN_CFG(RGMII1, SCU44C, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(RMII1, SCU44C, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(RXD3, SCU44C, GENMASK(14, 12), (4 << 12))); ++FUNCFG_DESCL(B10, PIN_CFG(RGMII1, SCU44C, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(RMII1, SCU44C, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(DSGPM0, SCU44C, GENMASK(18, 16), (4 << 16))); ++FUNCFG_DESCL(P13, PIN_CFG(RGMII1, SCU44C, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(RMII1, SCU44C, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(C18, PIN_CFG(RGMII1, SCU44C, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(RMII1RCLKO, SCU44C, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(SGPS, SCU44C, GENMASK(26, 24), (5 << 24))); ++FUNCFG_DESCL(C6, PIN_CFG(RGMII1, SCU44C, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(RMII1, SCU44C, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(C7, PIN_CFG(RGMII1, SCU450, GENMASK(2, 0), 1), ++ PIN_CFG(RMII1, SCU450, GENMASK(2, 0), 2), ++ PIN_CFG(DSGPM0, SCU450, GENMASK(2, 0), 4)); ++FUNCFG_DESCL(D7, PIN_CFG(RGMII1, SCU450, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(RMII1, SCU450, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(DSGPM0, SCU450, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(N13, PIN_CFG(RGMII1, SCU450, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SGPS, SCU450, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(C8, PIN_CFG(RGMII1, SCU450, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SGPS, SCU450, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(C9, PIN_CFG(MDIO1, SCU450, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(C10, PIN_CFG(MDIO1, SCU450, GENMASK(22, 20), (1 << 20))); ++FUNCFG_DESCL(M16, PIN_CFG(FWQSPI, SCU450, GENMASK(26, 24), (1 << 24))); ++FUNCFG_DESCL(A15, PIN_CFG(FWQSPI, SCU450, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(G11, PIN_CFG(I2C0, SCU454, GENMASK(2, 0), 1), ++ PIN_CFG(LTPI_PS_I2C0, SCU454, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(H7, PIN_CFG(I2C0, SCU454, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(LTPI_PS_I2C0, SCU454, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(H8, PIN_CFG(I2C1, SCU454, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(LTPI_PS_I2C1, SCU454, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(H9, PIN_CFG(I2C1, SCU454, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(LTPI_PS_I2C1, SCU454, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(H10, PIN_CFG(I2C2, SCU454, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(LTPI_PS_I2C2, SCU454, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(H11, PIN_CFG(I2C2, SCU454, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(LTPI_PS_I2C2, SCU454, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(J9, PIN_CFG(I2C3, SCU454, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(LTPI_PS_I2C3, SCU454, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(J10, PIN_CFG(I2C3, SCU454, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(LTPI_PS_I2C3, SCU454, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(E9, PIN_CFG(I2C4, SCU458, GENMASK(2, 0), 1), ++ PIN_CFG(I2CF1, SCU458, GENMASK(2, 0), 5)); ++FUNCFG_DESCL(F9, PIN_CFG(I2C4, SCU458, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(I2CF1, SCU458, GENMASK(6, 4), (5 << 4))); ++FUNCFG_DESCL(F8, PIN_CFG(I2C5, SCU458, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(I2CF1, SCU458, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(M13, PIN_CFG(I2C5, SCU458, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(I2CF1, SCU458, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(F7, PIN_CFG(I2C6, SCU458, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(I2CF2, SCU458, GENMASK(18, 16), (5 << 16))); ++FUNCFG_DESCL(D8, PIN_CFG(I2C6, SCU458, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(I2CF2, SCU458, GENMASK(22, 20), (5 << 20))); ++FUNCFG_DESCL(E8, PIN_CFG(I2C7, SCU458, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(I2CF2, SCU458, GENMASK(26, 24), (5 << 24))); ++FUNCFG_DESCL(L12, PIN_CFG(I2C7, SCU458, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(I2CF2, SCU458, GENMASK(30, 28), (5 << 28))); ++FUNCFG_DESCL(F12, PIN_CFG(I2C8, SCU45C, GENMASK(2, 0), 1), ++ PIN_CFG(I2CF0, SCU45C, GENMASK(2, 0), 5)); ++FUNCFG_DESCL(E12, PIN_CFG(I2C8, SCU45C, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(I2CF0, SCU45C, GENMASK(6, 4), (5 << 4))); ++FUNCFG_DESCL(J12, PIN_CFG(I2C9, SCU45C, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(I2CF0, SCU45C, GENMASK(10, 8), (5 << 8))); ++FUNCFG_DESCL(G7, PIN_CFG(I2C9, SCU45C, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(CANBUS, SCU45C, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(I2CF0, SCU45C, GENMASK(14, 12), (5 << 12))); ++FUNCFG_DESCL(G8, PIN_CFG(I2C10, SCU45C, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(CANBUS, SCU45C, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(G9, PIN_CFG(I2C10, SCU45C, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(CANBUS, SCU45C, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(G10, PIN_CFG(I2C11, SCU45C, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(USBUART, SCU45C, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(K12, PIN_CFG(I2C11, SCU45C, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(USBUART, SCU45C, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(W17, PIN_CFG(ADC0, SCU460, GENMASK(2, 0), 0), ++ PIN_CFG(GPIY0, SCU460, GENMASK(2, 0), 1), ++ PIN_CFG(SALT4, SCU460, GENMASK(2, 0), 2)); ++FUNCFG_DESCL(V18, PIN_CFG(ADC1, SCU460, GENMASK(6, 4), 0), ++ PIN_CFG(GPIY1, SCU460, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SALT5, SCU460, GENMASK(6, 4), (2 << 4))); ++FUNCFG_DESCL(W18, PIN_CFG(ADC2, SCU460, GENMASK(10, 8), 0), ++ PIN_CFG(GPIY2, SCU460, GENMASK(10, 8), (1 << 8)), ++ PIN_CFG(SALT6, SCU460, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(Y17, PIN_CFG(ADC3, SCU460, GENMASK(14, 12), 0), ++ PIN_CFG(GPIY3, SCU460, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SALT7, SCU460, GENMASK(14, 12), (2 << 12))); ++FUNCFG_DESCL(AA18, PIN_CFG(ADC4, SCU460, GENMASK(18, 16), 0), ++ PIN_CFG(GPIY4, SCU460, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(SALT8, SCU460, GENMASK(18, 16), (2 << 16))); ++FUNCFG_DESCL(AA13, PIN_CFG(ADC5, SCU460, GENMASK(22, 20), 0), ++ PIN_CFG(GPIY5, SCU460, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(SALT9, SCU460, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(Y18, PIN_CFG(ADC6, SCU460, GENMASK(26, 24), 0), ++ PIN_CFG(GPIY6, SCU460, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(SALT10, SCU460, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(AA12, PIN_CFG(ADC7, SCU460, GENMASK(30, 28), 0), ++ PIN_CFG(GPIY7, SCU460, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(SALT11, SCU460, GENMASK(30, 28), (2 << 28))); ++FUNCFG_DESCL(W20, PIN_CFG(ADC8, SCU464, GENMASK(2, 0), 0), ++ PIN_CFG(GPIZ0, SCU464, GENMASK(2, 0), 1)); ++FUNCFG_DESCL(V20, PIN_CFG(ADC9, SCU464, GENMASK(6, 4), 0), ++ PIN_CFG(GPIZ1, SCU464, GENMASK(6, 4), (1 << 4))); ++FUNCFG_DESCL(Y11, PIN_CFG(ADC10, SCU464, GENMASK(10, 8), 0), ++ PIN_CFG(GPIZ2, SCU464, GENMASK(10, 8), (1 << 8))); ++FUNCFG_DESCL(V14, PIN_CFG(ADC11, SCU464, GENMASK(14, 12), 0), ++ PIN_CFG(GPIZ3, SCU464, GENMASK(14, 12), (1 << 12))); ++FUNCFG_DESCL(V19, PIN_CFG(ADC12, SCU464, GENMASK(18, 16), 0), ++ PIN_CFG(GPIZ4, SCU464, GENMASK(18, 16), (1 << 16))); ++FUNCFG_DESCL(W14, PIN_CFG(ADC13, SCU464, GENMASK(22, 20), 0), ++ PIN_CFG(GPIZ5, SCU464, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(AUXPWRGOOD0, SCU464, GENMASK(22, 20), (2 << 20))); ++FUNCFG_DESCL(Y20, PIN_CFG(ADC14, SCU464, GENMASK(26, 24), 0), ++ PIN_CFG(GPIZ6, SCU464, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(AUXPWRGOOD1, SCU464, GENMASK(26, 24), (2 << 24))); ++FUNCFG_DESCL(AB19, PIN_CFG(ADC15, SCU464, GENMASK(30, 28), 0), ++ PIN_CFG(GPIZ7, SCU464, GENMASK(30, 28), (1 << 28))); ++FUNCFG_DESCL(U21, PIN_CFG(SGPM0, SCU468, GENMASK(2, 0), 1), ++ PIN_CFG(SMON0, SCU468, GENMASK(2, 0), 2), ++ PIN_CFG(NCTS2, SCU468, GENMASK(2, 0), 3), ++ PIN_CFG(MACLINK0, SCU468, GENMASK(2, 0), 4)); ++FUNCFG_DESCL(T24, PIN_CFG(SGPM0, SCU468, GENMASK(6, 4), (1 << 4)), ++ PIN_CFG(SMON0, SCU468, GENMASK(6, 4), (2 << 4)), ++ PIN_CFG(NDCD2, SCU468, GENMASK(6, 4), (3 << 4)), ++ PIN_CFG(MACLINK2, SCU468, GENMASK(6, 4), (4 << 4))); ++FUNCFG_DESCL(V24, PIN_CFG(SGPM0LD_R, SCU468, GENMASK(10, 8), (2 << 8)), ++ PIN_CFG(HBLED, SCU468, GENMASK(10, 8), (2 << 8))); ++FUNCFG_DESCL(V22, PIN_CFG(SGPM0, SCU468, GENMASK(14, 12), (1 << 12)), ++ PIN_CFG(SMON0, SCU468, GENMASK(14, 12), (2 << 12)), ++ PIN_CFG(NDSR2, SCU468, GENMASK(14, 12), (3 << 12))); ++FUNCFG_DESCL(T23, PIN_CFG(SGPM0, SCU468, GENMASK(18, 16), (1 << 16)), ++ PIN_CFG(SMON0, SCU468, GENMASK(18, 16), (2 << 16)), ++ PIN_CFG(NRI2, SCU468, GENMASK(18, 16), (3 << 16))); ++FUNCFG_DESCL(AC25, PIN_CFG(SGPM1, SCU468, GENMASK(22, 20), (1 << 20)), ++ PIN_CFG(WDTRST4N, SCU468, GENMASK(22, 20), (2 << 20)), ++ PIN_CFG(NDTR2, SCU468, GENMASK(22, 20), (3 << 20)), ++ PIN_CFG(SMON1, SCU468, GENMASK(22, 20), (4 << 20))); ++FUNCFG_DESCL(AB25, PIN_CFG(SGPM1, SCU468, GENMASK(26, 24), (1 << 24)), ++ PIN_CFG(WDTRST5N, SCU468, GENMASK(26, 24), (2 << 24)), ++ PIN_CFG(NRTS2, SCU468, GENMASK(26, 24), (3 << 24)), ++ PIN_CFG(SMON1, SCU468, GENMASK(26, 24), (4 << 24))); ++FUNCFG_DESCL(AC24, PIN_CFG(SGPM1LD_R, SCU468, GENMASK(30, 28), (1 << 28)), ++ PIN_CFG(WDTRST6N, SCU468, GENMASK(30, 28), (2 << 28)), ++ PIN_CFG(MACLINK1, SCU468, GENMASK(30, 28), (3 << 28))); ++FUNCFG_DESCL(SGMII0, PIN_CFG(SGMII, SCU47C, BIT(0), 1 << 0)); ++FUNCFG_DESCL(PCIERC2_PERST, PIN_CFG(PE2SGRSTN, SCU908, BIT(1), 1 << 1)); ++FUNCFG_DESCL(PORTC_MODE, PIN_CFG(USB2CUD, SCU3B0, GENMASK(1, 0), 0), ++ PIN_CFG(USB2CD, SCU3B0, GENMASK(1, 0), 1 << 0), ++ PIN_CFG(USB2CH, SCU3B0, GENMASK(1, 0), 2 << 0), ++ PIN_CFG(USB2CU, SCU3B0, GENMASK(1, 0), 3 << 0)); ++FUNCFG_DESCL(PORTD_MODE, PIN_CFG(USB2DD, SCU3B0, GENMASK(3, 2), 1 << 2), ++ PIN_CFG(USB2DH, SCU3B0, GENMASK(3, 2), 2 << 2)); ++ ++static const struct aspeed_g7_pincfg pin_cfg[] = { ++ PINCFG_PIN(C16), PINCFG_PIN(C14), PINCFG_PIN(C11), ++ PINCFG_PIN(D9), PINCFG_PIN(F14), PINCFG_PIN(D10), ++ PINCFG_PIN(C12), PINCFG_PIN(C13), PINCFG_PIN(AC26), ++ PINCFG_PIN(AA25), PINCFG_PIN(AB23), PINCFG_PIN(U22), ++ PINCFG_PIN(V21), PINCFG_PIN(N26), PINCFG_PIN(P25), ++ PINCFG_PIN(N25), PINCFG_PIN(V23), PINCFG_PIN(W22), ++ PINCFG_PIN(AB26), PINCFG_PIN(AD26), PINCFG_PIN(P26), ++ PINCFG_PIN(AE26), PINCFG_PIN(AF26), PINCFG_PIN(AF25), ++ PINCFG_PIN(AE25), PINCFG_PIN(AD25), PINCFG_PIN(AF23), ++ PINCFG_PIN(AF20), PINCFG_PIN(AF21), PINCFG_PIN(AE21), ++ PINCFG_PIN(AE23), PINCFG_PIN(AD22), PINCFG_PIN(AF17), ++ PINCFG_PIN(AA16), PINCFG_PIN(Y16), PINCFG_PIN(V17), ++ PINCFG_PIN(J13), PINCFG_PIN(AB16), PINCFG_PIN(AC16), ++ PINCFG_PIN(AF16), PINCFG_PIN(AA15), PINCFG_PIN(AB15), ++ PINCFG_PIN(AC15), PINCFG_PIN(AD15), PINCFG_PIN(Y15), ++ PINCFG_PIN(AA14), PINCFG_PIN(W16), PINCFG_PIN(V16), ++ PINCFG_PIN(AB18), PINCFG_PIN(AC18), PINCFG_PIN(K13), ++ PINCFG_PIN(AA17), PINCFG_PIN(AB17), PINCFG_PIN(AD16), ++ PINCFG_PIN(AC17), PINCFG_PIN(AD17), PINCFG_PIN(AE16), ++ PINCFG_PIN(AE17), PINCFG_PIN(AB24), PINCFG_PIN(W26), ++ PINCFG_PIN(HOLE0), PINCFG_PIN(HOLE1), PINCFG_PIN(HOLE2), ++ PINCFG_PIN(HOLE3), PINCFG_PIN(W25), PINCFG_PIN(Y23), ++ PINCFG_PIN(Y24), PINCFG_PIN(W21), PINCFG_PIN(AA23), ++ PINCFG_PIN(AC22), PINCFG_PIN(AB22), PINCFG_PIN(Y21), ++ PINCFG_PIN(AE20), PINCFG_PIN(AF19), PINCFG_PIN(Y22), ++ PINCFG_PIN(AA20), PINCFG_PIN(AA22), PINCFG_PIN(AB20), ++ PINCFG_PIN(AF18), PINCFG_PIN(AE19), PINCFG_PIN(AD20), ++ PINCFG_PIN(AC20), PINCFG_PIN(AA21), PINCFG_PIN(AB21), ++ PINCFG_PIN(AC19), PINCFG_PIN(AE18), PINCFG_PIN(AD19), ++ PINCFG_PIN(AD18), PINCFG_PIN(U25), PINCFG_PIN(U26), ++ PINCFG_PIN(Y26), PINCFG_PIN(AA24), PINCFG_PIN(R25), ++ PINCFG_PIN(AA26), PINCFG_PIN(R26), PINCFG_PIN(Y25), ++ PINCFG_PIN(B16), PINCFG_PIN(D14), PINCFG_PIN(B15), ++ PINCFG_PIN(B14), PINCFG_PIN(C17), PINCFG_PIN(B13), ++ PINCFG_PIN(E14), PINCFG_PIN(C15), PINCFG_PIN(D24), ++ PINCFG_PIN(B23), PINCFG_PIN(B22), PINCFG_PIN(C23), ++ PINCFG_PIN(B18), PINCFG_PIN(B21), PINCFG_PIN(M15), ++ PINCFG_PIN(B19), PINCFG_PIN(B26), PINCFG_PIN(A25), ++ PINCFG_PIN(A24), PINCFG_PIN(B24), PINCFG_PIN(E26), ++ PINCFG_PIN(A21), PINCFG_PIN(A19), PINCFG_PIN(A18), ++ PINCFG_PIN(D26), PINCFG_PIN(C26), PINCFG_PIN(A23), ++ PINCFG_PIN(A22), PINCFG_PIN(B25), PINCFG_PIN(F26), ++ PINCFG_PIN(A26), PINCFG_PIN(A14), PINCFG_PIN(E10), ++ PINCFG_PIN(E13), PINCFG_PIN(D12), PINCFG_PIN(F10), ++ PINCFG_PIN(E11), PINCFG_PIN(F11), PINCFG_PIN(F13), ++ PINCFG_PIN(N15), PINCFG_PIN(C20), PINCFG_PIN(C19), ++ PINCFG_PIN(A8), PINCFG_PIN(R14), PINCFG_PIN(A7), ++ PINCFG_PIN(P14), PINCFG_PIN(D20), PINCFG_PIN(A6), ++ PINCFG_PIN(B6), PINCFG_PIN(N14), PINCFG_PIN(B7), ++ PINCFG_PIN(B8), PINCFG_PIN(B9), PINCFG_PIN(M14), ++ PINCFG_PIN(J11), PINCFG_PIN(E7), PINCFG_PIN(D19), ++ PINCFG_PIN(B11), PINCFG_PIN(D15), PINCFG_PIN(B12), ++ PINCFG_PIN(B10), PINCFG_PIN(P13), PINCFG_PIN(C18), ++ PINCFG_PIN(C6), PINCFG_PIN(C7), PINCFG_PIN(D7), ++ PINCFG_PIN(N13), PINCFG_PIN(C8), PINCFG_PIN(C9), ++ PINCFG_PIN(C10), PINCFG_PIN(M16), PINCFG_PIN(A15), ++ PINCFG_PIN(G11), PINCFG_PIN(H7), PINCFG_PIN(H8), ++ PINCFG_PIN(H9), PINCFG_PIN(H10), PINCFG_PIN(H11), ++ PINCFG_PIN(J9), PINCFG_PIN(J10), PINCFG_PIN(E9), ++ PINCFG_PIN(F9), PINCFG_PIN(F8), PINCFG_PIN(M13), ++ PINCFG_PIN(F7), PINCFG_PIN(D8), PINCFG_PIN(E8), ++ PINCFG_PIN(L12), PINCFG_PIN(F12), PINCFG_PIN(E12), ++ PINCFG_PIN(J12), PINCFG_PIN(G7), PINCFG_PIN(G8), ++ PINCFG_PIN(G9), PINCFG_PIN(G10), PINCFG_PIN(K12), ++ PINCFG_PIN(W17), PINCFG_PIN(V18), PINCFG_PIN(W18), ++ PINCFG_PIN(Y17), PINCFG_PIN(AA18), PINCFG_PIN(AA13), ++ PINCFG_PIN(Y18), PINCFG_PIN(AA12), PINCFG_PIN(W20), ++ PINCFG_PIN(V20), PINCFG_PIN(Y11), PINCFG_PIN(V14), ++ PINCFG_PIN(V19), PINCFG_PIN(W14), PINCFG_PIN(Y20), ++ PINCFG_PIN(AB19), PINCFG_PIN(U21), PINCFG_PIN(T24), ++ PINCFG_PIN(V24), PINCFG_PIN(V22), PINCFG_PIN(T23), ++ PINCFG_PIN(AC25), PINCFG_PIN(AB25), PINCFG_PIN(AC24), ++ PINCFG_PIN(SGMII0), PINCFG_PIN(PCIERC2_PERST), ++ PINCFG_PIN(PORTC_MODE), PINCFG_PIN(PORTD_MODE), ++}; ++ ++static int aspeed_g7_soc1_dt_node_to_map(struct pinctrl_dev *pctldev, ++ struct device_node *np_config, ++ struct pinctrl_map **map, u32 *num_maps) ++{ ++ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, ++ PIN_MAP_TYPE_INVALID); ++} ++ ++static void aspeed_g7_soc1_dt_free_map(struct pinctrl_dev *pctldev, ++ struct pinctrl_map *map, u32 num_maps) ++{ ++ kfree(map); ++} ++ ++static const struct pinctrl_ops aspeed_g7_soc1_pinctrl_ops = { ++ .get_groups_count = aspeed_pinctrl_get_groups_count, ++ .get_group_name = aspeed_pinctrl_get_group_name, ++ .get_group_pins = aspeed_pinctrl_get_group_pins, ++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show, ++ .dt_node_to_map = aspeed_g7_soc1_dt_node_to_map, ++ .dt_free_map = aspeed_g7_soc1_dt_free_map, ++}; ++ ++static const struct pinmux_ops aspeed_g7_soc1_pinmux_ops = { ++ .get_functions_count = aspeed_pinmux_get_fn_count, ++ .get_function_name = aspeed_pinmux_get_fn_name, ++ .get_function_groups = aspeed_pinmux_get_fn_groups, ++ .set_mux = aspeed_g7_pinmux_set_mux, ++ .gpio_request_enable = aspeed_g7_gpio_request_enable, ++ .strict = true, ++}; ++ ++static const struct pinconf_ops aspeed_g7_soc1_pinconf_ops = { ++ .is_generic = true, ++ .pin_config_get = aspeed_pin_config_get, ++ .pin_config_set = aspeed_pin_config_set, ++ .pin_config_group_get = aspeed_pin_config_group_get, ++ .pin_config_group_set = aspeed_pin_config_group_set, ++}; ++ ++/* pinctrl_desc */ ++static struct pinctrl_desc aspeed_g7_soc1_pinctrl_desc = { ++ .name = "aspeed-g7-soc1-pinctrl", ++ .pins = aspeed_g7_soc1_pins, ++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins), ++ .pctlops = &aspeed_g7_soc1_pinctrl_ops, ++ .pmxops = &aspeed_g7_soc1_pinmux_ops, ++ .confops = &aspeed_g7_soc1_pinconf_ops, ++ .owner = THIS_MODULE, ++}; ++ ++static struct aspeed_pin_config aspeed_g7_configs[] = { ++ /* GPIOA */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { C16, C16 }, SCU4C0, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C14, C14 }, SCU4C0, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C11, C11 }, SCU4C0, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D9, D9 }, SCU4C0, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F14, F14 }, SCU4C0, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D10, D10 }, SCU4C0, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C12, C12 }, SCU4C0, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C13, C13 }, SCU4C0, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { C16, C13 }, SCU4A0, BIT_MASK(4) }, ++ /* GPIOI */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { W25, W25 }, SCU4C0, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y23, Y23 }, SCU4C0, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y24, Y24 }, SCU4C0, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { W21, W21 }, SCU4C0, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA23, AA23 }, SCU4C0, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AC22, AC22 }, SCU4C0, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AB22, AB22 }, SCU4C0, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y21, Y21 }, SCU4C0, GENMASK(31, 30) }, ++ { PIN_CONFIG_POWER_SOURCE, { W25, Y21 }, SCU4A0, BIT_MASK(12) }, ++ /* GPIOJ */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE20, AE20 }, SCU4C4, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AF19, AF19 }, SCU4C4, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y22, Y22 }, SCU4C4, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA20, AA20 }, SCU4C4, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA22, AA22 }, SCU4C4, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AB20, AB20 }, SCU4C4, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AF18, AF18 }, SCU4C4, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE19, AE19 }, SCU4C4, GENMASK(15, 14) }, ++ /* GPIOK */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { AD20, AD20 }, SCU4C4, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AC20, AC20 }, SCU4C4, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA21, AA21 }, SCU4C4, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AB21, AB21 }, SCU4C4, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AC19, AC19 }, SCU4C4, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AE18, AE18 }, SCU4C4, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AD19, AD19 }, SCU4C4, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AD18, AD18 }, SCU4C4, GENMASK(31, 30) }, ++ /* GPIOL */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { U25, U25 }, SCU4C8, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { U26, U26 }, SCU4C8, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y26, Y26 }, SCU4C8, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA24, AA24 }, SCU4C8, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { R25, R25 }, SCU4C8, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { AA26, AA26 }, SCU4C8, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { R26, R26 }, SCU4C8, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { Y25, Y25 }, SCU4C8, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { U25, Y25 }, SCU4A0, BIT_MASK(15) }, ++ /* GPIOM */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { B16, B16 }, SCU4C8, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D14, D14 }, SCU4C8, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B15, B15 }, SCU4C8, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B14, B14 }, SCU4C8, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C17, C17 }, SCU4C8, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B13, B13 }, SCU4C8, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E14, E14 }, SCU4C8, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C15, C15 }, SCU4C8, GENMASK(31, 30) }, ++ { PIN_CONFIG_POWER_SOURCE, { B16, C15 }, SCU4A0, BIT_MASK(16) }, ++ /* GPION */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { D24, D24 }, SCU4CC, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B23, B23 }, SCU4CC, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B22, B22 }, SCU4CC, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C23, C23 }, SCU4CC, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B18, B18 }, SCU4CC, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B21, B21 }, SCU4CC, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { M15, M15 }, SCU4CC, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B19, B19 }, SCU4CC, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { D24, B19 }, SCU4A0, BIT_MASK(17) }, ++ /* GPIOO */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { B26, B26 }, SCU4CC, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A25, A25 }, SCU4CC, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A24, A24 }, SCU4CC, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B24, B24 }, SCU4CC, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E26, E26 }, SCU4CC, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A21, A21 }, SCU4CC, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A19, A19 }, SCU4CC, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A18, A18 }, SCU4CC, GENMASK(31, 30) }, ++ { PIN_CONFIG_POWER_SOURCE, { B26, A18 }, SCU4A0, BIT_MASK(18) }, ++ /* GPIOP */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { D26, D26 }, SCU4D0, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C26, C26 }, SCU4D0, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A23, A23 }, SCU4D0, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A22, A22 }, SCU4D0, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B25, B25 }, SCU4D0, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F26, F26 }, SCU4D0, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A26, A26 }, SCU4D0, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A14, A14 }, SCU4D0, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { D26, A14 }, SCU4A0, BIT_MASK(19) }, ++ /* GPIOQ */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { E10, E10 }, SCU4D0, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E13, E13 }, SCU4D0, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D12, D12 }, SCU4D0, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F10, F10 }, SCU4D0, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E11, E11 }, SCU4D0, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F11, F11 }, SCU4D0, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F13, F13 }, SCU4D0, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { N15, N15 }, SCU4D0, GENMASK(31, 30) }, ++ /* GPIOR */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { C20, C20 }, SCU4D4, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C19, C19 }, SCU4D4, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A8, A8 }, SCU4D4, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { R14, R14 }, SCU4D4, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A7, A7 }, SCU4D4, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { P14, P14 }, SCU4D4, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D20, D20 }, SCU4D4, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A6, A6 }, SCU4D4, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { C20, A6 }, SCU4A0, BIT_MASK(21) }, ++ /* GPIOS */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { B6, B6 }, SCU4D4, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { N14, N14 }, SCU4D4, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B7, B7 }, SCU4D4, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B8, B8 }, SCU4D4, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B9, B9 }, SCU4D4, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { M14, M14 }, SCU4D4, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { J11, J11 }, SCU4D4, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E7, E7 }, SCU4D4, GENMASK(31, 30) }, ++ { PIN_CONFIG_POWER_SOURCE, { B6, E7 }, SCU4A0, BIT_MASK(22) }, ++ /* GPIOT */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { D19, D19 }, SCU4D8, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B11, B11 }, SCU4D8, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D15, D15 }, SCU4D8, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B12, B12 }, SCU4D8, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { B10, B10 }, SCU4D8, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { P13, P13 }, SCU4D8, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C18, C18 }, SCU4D8, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C6, C6 }, SCU4D8, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { D19, C6 }, SCU4A0, BIT_MASK(23) }, ++ /* GPIOU */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { C7, C7 }, SCU4D8, GENMASK(17, 16) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D7, D7 }, SCU4D8, GENMASK(19, 18) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { N13, N13 }, SCU4D8, GENMASK(21, 20) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C8, C8 }, SCU4D8, GENMASK(23, 22) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C9, C9 }, SCU4D8, GENMASK(25, 24) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { C10, C10 }, SCU4D8, GENMASK(27, 26) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { M16, M16 }, SCU4D8, GENMASK(29, 28) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { A15, A15 }, SCU4D8, GENMASK(31, 30) }, ++ { PIN_CONFIG_POWER_SOURCE, { C7, A15 }, SCU4A0, BIT_MASK(24) }, ++ /* GPIOW */ ++ { PIN_CONFIG_DRIVE_STRENGTH, { E9, E9 }, SCU4DC, GENMASK(1, 0) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F9, F9 }, SCU4DC, GENMASK(3, 2) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F8, F8 }, SCU4DC, GENMASK(5, 4) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { M13, M13 }, SCU4DC, GENMASK(7, 6) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { F7, F7 }, SCU4DC, GENMASK(9, 8) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { D8, D8 }, SCU4DC, GENMASK(11, 10) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { E8, E8 }, SCU4DC, GENMASK(13, 12) }, ++ { PIN_CONFIG_DRIVE_STRENGTH, { L12, L12 }, SCU4DC, GENMASK(15, 14) }, ++ { PIN_CONFIG_POWER_SOURCE, { E9, L12 }, SCU4A0, BIT_MASK(26) }, ++ ++ ASPEED_PULL_DOWN_PINCONF(C16, SCU480, 0), ++ ASPEED_PULL_DOWN_PINCONF(C14, SCU480, 1), ++ ASPEED_PULL_DOWN_PINCONF(C11, SCU480, 2), ++ ASPEED_PULL_DOWN_PINCONF(D9, SCU480, 3), ++ ASPEED_PULL_DOWN_PINCONF(F14, SCU480, 4), ++ ASPEED_PULL_DOWN_PINCONF(D10, SCU480, 5), ++ ASPEED_PULL_DOWN_PINCONF(C12, SCU480, 6), ++ ASPEED_PULL_DOWN_PINCONF(C13, SCU480, 7), ++ ASPEED_PULL_DOWN_PINCONF(AC26, SCU480, 8), ++ ASPEED_PULL_DOWN_PINCONF(AA25, SCU480, 9), ++ ASPEED_PULL_DOWN_PINCONF(AB23, SCU480, 10), ++ ASPEED_PULL_DOWN_PINCONF(U22, SCU480, 11), ++ ASPEED_PULL_DOWN_PINCONF(V21, SCU480, 12), ++ ASPEED_PULL_DOWN_PINCONF(N26, SCU480, 13), ++ ASPEED_PULL_DOWN_PINCONF(P25, SCU480, 14), ++ ASPEED_PULL_DOWN_PINCONF(N25, SCU480, 15), ++ ASPEED_PULL_DOWN_PINCONF(V23, SCU480, 16), ++ ASPEED_PULL_DOWN_PINCONF(W22, SCU480, 17), ++ ASPEED_PULL_DOWN_PINCONF(AB26, SCU480, 18), ++ ASPEED_PULL_DOWN_PINCONF(AD26, SCU480, 19), ++ ASPEED_PULL_DOWN_PINCONF(P26, SCU480, 20), ++ ASPEED_PULL_DOWN_PINCONF(AE26, SCU480, 21), ++ ASPEED_PULL_DOWN_PINCONF(AF26, SCU480, 22), ++ ASPEED_PULL_DOWN_PINCONF(AF25, SCU480, 23), ++ ASPEED_PULL_DOWN_PINCONF(AE25, SCU480, 24), ++ ASPEED_PULL_DOWN_PINCONF(AD25, SCU480, 25), ++ ASPEED_PULL_DOWN_PINCONF(AF23, SCU480, 26), ++ ASPEED_PULL_DOWN_PINCONF(AF20, SCU480, 27), ++ ASPEED_PULL_DOWN_PINCONF(AF21, SCU480, 28), ++ ASPEED_PULL_DOWN_PINCONF(AE21, SCU480, 29), ++ ASPEED_PULL_DOWN_PINCONF(AE23, SCU480, 30), ++ ASPEED_PULL_DOWN_PINCONF(AD22, SCU480, 31), ++ ASPEED_PULL_DOWN_PINCONF(AF17, SCU484, 0), ++ ASPEED_PULL_DOWN_PINCONF(AA16, SCU484, 1), ++ ASPEED_PULL_DOWN_PINCONF(Y16, SCU484, 2), ++ ASPEED_PULL_DOWN_PINCONF(V17, SCU484, 3), ++ ASPEED_PULL_DOWN_PINCONF(J13, SCU484, 4), ++ ASPEED_PULL_DOWN_PINCONF(AB16, SCU484, 5), ++ ASPEED_PULL_DOWN_PINCONF(AC16, SCU484, 6), ++ ASPEED_PULL_DOWN_PINCONF(AF16, SCU484, 7), ++ ASPEED_PULL_DOWN_PINCONF(AA15, SCU484, 8), ++ ASPEED_PULL_DOWN_PINCONF(AB15, SCU484, 9), ++ ASPEED_PULL_DOWN_PINCONF(AC15, SCU484, 10), ++ ASPEED_PULL_DOWN_PINCONF(AD15, SCU484, 11), ++ ASPEED_PULL_DOWN_PINCONF(Y15, SCU484, 12), ++ ASPEED_PULL_DOWN_PINCONF(AA14, SCU484, 13), ++ ASPEED_PULL_DOWN_PINCONF(W16, SCU484, 14), ++ ASPEED_PULL_DOWN_PINCONF(V16, SCU484, 15), ++ ASPEED_PULL_DOWN_PINCONF(AB18, SCU484, 16), ++ ASPEED_PULL_DOWN_PINCONF(AC18, SCU484, 17), ++ ASPEED_PULL_DOWN_PINCONF(K13, SCU484, 18), ++ ASPEED_PULL_DOWN_PINCONF(AA17, SCU484, 19), ++ ASPEED_PULL_DOWN_PINCONF(AB17, SCU484, 20), ++ ASPEED_PULL_DOWN_PINCONF(AD16, SCU484, 21), ++ ASPEED_PULL_DOWN_PINCONF(AC17, SCU484, 22), ++ ASPEED_PULL_DOWN_PINCONF(AD17, SCU484, 23), ++ ASPEED_PULL_DOWN_PINCONF(AE16, SCU484, 24), ++ ASPEED_PULL_DOWN_PINCONF(AE17, SCU484, 25), ++ ASPEED_PULL_DOWN_PINCONF(AB24, SCU484, 26), ++ ASPEED_PULL_DOWN_PINCONF(W26, SCU484, 27), ++ ASPEED_PULL_DOWN_PINCONF(HOLE0, SCU484, 28), ++ ASPEED_PULL_DOWN_PINCONF(HOLE1, SCU484, 29), ++ ASPEED_PULL_DOWN_PINCONF(HOLE2, SCU484, 30), ++ ASPEED_PULL_DOWN_PINCONF(HOLE3, SCU484, 31), ++ ASPEED_PULL_DOWN_PINCONF(W25, SCU488, 0), ++ ASPEED_PULL_DOWN_PINCONF(Y23, SCU488, 1), ++ ASPEED_PULL_DOWN_PINCONF(Y24, SCU488, 2), ++ ASPEED_PULL_DOWN_PINCONF(W21, SCU488, 3), ++ ASPEED_PULL_DOWN_PINCONF(AA23, SCU488, 4), ++ ASPEED_PULL_DOWN_PINCONF(AC22, SCU488, 5), ++ ASPEED_PULL_DOWN_PINCONF(AB22, SCU488, 6), ++ ASPEED_PULL_DOWN_PINCONF(Y21, SCU488, 7), ++ ASPEED_PULL_DOWN_PINCONF(AE20, SCU488, 8), ++ ASPEED_PULL_DOWN_PINCONF(AF19, SCU488, 9), ++ ASPEED_PULL_DOWN_PINCONF(Y22, SCU488, 10), ++ ASPEED_PULL_DOWN_PINCONF(AA20, SCU488, 11), ++ ASPEED_PULL_DOWN_PINCONF(AA22, SCU488, 12), ++ ASPEED_PULL_DOWN_PINCONF(AB20, SCU488, 13), ++ ASPEED_PULL_DOWN_PINCONF(AF18, SCU488, 14), ++ ASPEED_PULL_DOWN_PINCONF(AE19, SCU488, 15), ++ ASPEED_PULL_DOWN_PINCONF(AD20, SCU488, 16), ++ ASPEED_PULL_DOWN_PINCONF(AC20, SCU488, 17), ++ ASPEED_PULL_DOWN_PINCONF(AA21, SCU488, 18), ++ ASPEED_PULL_DOWN_PINCONF(AB21, SCU488, 19), ++ ASPEED_PULL_DOWN_PINCONF(AC19, SCU488, 20), ++ ASPEED_PULL_DOWN_PINCONF(AE18, SCU488, 21), ++ ASPEED_PULL_DOWN_PINCONF(AD19, SCU488, 22), ++ ASPEED_PULL_DOWN_PINCONF(AD18, SCU488, 23), ++ ASPEED_PULL_DOWN_PINCONF(U25, SCU488, 24), ++ ASPEED_PULL_DOWN_PINCONF(U26, SCU488, 25), ++ ASPEED_PULL_DOWN_PINCONF(Y26, SCU488, 26), ++ ASPEED_PULL_DOWN_PINCONF(AA24, SCU488, 27), ++ ASPEED_PULL_DOWN_PINCONF(R25, SCU488, 28), ++ ASPEED_PULL_DOWN_PINCONF(AA26, SCU488, 29), ++ ASPEED_PULL_DOWN_PINCONF(R26, SCU488, 30), ++ ASPEED_PULL_DOWN_PINCONF(Y25, SCU488, 31), ++ ASPEED_PULL_DOWN_PINCONF(B16, SCU48C, 0), ++ ASPEED_PULL_DOWN_PINCONF(D14, SCU48C, 1), ++ ASPEED_PULL_DOWN_PINCONF(B15, SCU48C, 2), ++ ASPEED_PULL_DOWN_PINCONF(B14, SCU48C, 3), ++ ASPEED_PULL_DOWN_PINCONF(C17, SCU48C, 4), ++ ASPEED_PULL_DOWN_PINCONF(B13, SCU48C, 5), ++ ASPEED_PULL_DOWN_PINCONF(E14, SCU48C, 6), ++ ASPEED_PULL_DOWN_PINCONF(C15, SCU48C, 7), ++ ASPEED_PULL_DOWN_PINCONF(D24, SCU48C, 8), ++ ASPEED_PULL_DOWN_PINCONF(B23, SCU48C, 9), ++ ASPEED_PULL_DOWN_PINCONF(B22, SCU48C, 10), ++ ASPEED_PULL_DOWN_PINCONF(C23, SCU48C, 11), ++ ASPEED_PULL_DOWN_PINCONF(B18, SCU48C, 12), ++ ASPEED_PULL_DOWN_PINCONF(B21, SCU48C, 13), ++ ASPEED_PULL_DOWN_PINCONF(M15, SCU48C, 14), ++ ASPEED_PULL_DOWN_PINCONF(B19, SCU48C, 15), ++ ASPEED_PULL_DOWN_PINCONF(B26, SCU48C, 16), ++ ASPEED_PULL_DOWN_PINCONF(A25, SCU48C, 17), ++ ASPEED_PULL_DOWN_PINCONF(A24, SCU48C, 18), ++ ASPEED_PULL_DOWN_PINCONF(B24, SCU48C, 19), ++ ASPEED_PULL_DOWN_PINCONF(E26, SCU48C, 20), ++ ASPEED_PULL_DOWN_PINCONF(A21, SCU48C, 21), ++ ASPEED_PULL_DOWN_PINCONF(A19, SCU48C, 22), ++ ASPEED_PULL_DOWN_PINCONF(A18, SCU48C, 23), ++ ASPEED_PULL_DOWN_PINCONF(D26, SCU48C, 24), ++ ASPEED_PULL_DOWN_PINCONF(C26, SCU48C, 25), ++ ASPEED_PULL_DOWN_PINCONF(A23, SCU48C, 26), ++ ASPEED_PULL_DOWN_PINCONF(A22, SCU48C, 27), ++ ASPEED_PULL_DOWN_PINCONF(B25, SCU48C, 28), ++ ASPEED_PULL_DOWN_PINCONF(F26, SCU48C, 29), ++ ASPEED_PULL_DOWN_PINCONF(A26, SCU48C, 30), ++ ASPEED_PULL_DOWN_PINCONF(A14, SCU48C, 31), ++ ASPEED_PULL_DOWN_PINCONF(E10, SCU490, 0), ++ ASPEED_PULL_DOWN_PINCONF(E13, SCU490, 1), ++ ASPEED_PULL_DOWN_PINCONF(D12, SCU490, 2), ++ ASPEED_PULL_DOWN_PINCONF(F10, SCU490, 3), ++ ASPEED_PULL_DOWN_PINCONF(E11, SCU490, 4), ++ ASPEED_PULL_DOWN_PINCONF(F11, SCU490, 5), ++ ASPEED_PULL_DOWN_PINCONF(F13, SCU490, 6), ++ ASPEED_PULL_DOWN_PINCONF(N15, SCU490, 7), ++ ASPEED_PULL_DOWN_PINCONF(C20, SCU490, 8), ++ ASPEED_PULL_DOWN_PINCONF(C19, SCU490, 9), ++ ASPEED_PULL_DOWN_PINCONF(A8, SCU490, 10), ++ ASPEED_PULL_DOWN_PINCONF(R14, SCU490, 11), ++ ASPEED_PULL_DOWN_PINCONF(A7, SCU490, 12), ++ ASPEED_PULL_DOWN_PINCONF(P14, SCU490, 13), ++ ASPEED_PULL_DOWN_PINCONF(D20, SCU490, 14), ++ ASPEED_PULL_DOWN_PINCONF(A6, SCU490, 15), ++ ASPEED_PULL_DOWN_PINCONF(B6, SCU490, 16), ++ ASPEED_PULL_DOWN_PINCONF(N14, SCU490, 17), ++ ASPEED_PULL_DOWN_PINCONF(B7, SCU490, 18), ++ ASPEED_PULL_DOWN_PINCONF(B8, SCU490, 19), ++ ASPEED_PULL_DOWN_PINCONF(B9, SCU490, 20), ++ ASPEED_PULL_DOWN_PINCONF(M14, SCU490, 21), ++ ASPEED_PULL_DOWN_PINCONF(J11, SCU490, 22), ++ ASPEED_PULL_DOWN_PINCONF(E7, SCU490, 23), ++ ASPEED_PULL_DOWN_PINCONF(D19, SCU490, 24), ++ ASPEED_PULL_DOWN_PINCONF(B11, SCU490, 25), ++ ASPEED_PULL_DOWN_PINCONF(D15, SCU490, 26), ++ ASPEED_PULL_DOWN_PINCONF(B12, SCU490, 27), ++ ASPEED_PULL_DOWN_PINCONF(B10, SCU490, 28), ++ ASPEED_PULL_DOWN_PINCONF(P13, SCU490, 29), ++ ASPEED_PULL_DOWN_PINCONF(C18, SCU490, 30), ++ ASPEED_PULL_DOWN_PINCONF(C6, SCU490, 31), ++ ASPEED_PULL_DOWN_PINCONF(C7, SCU494, 0), ++ ASPEED_PULL_DOWN_PINCONF(D7, SCU494, 1), ++ ASPEED_PULL_DOWN_PINCONF(N13, SCU494, 2), ++ ASPEED_PULL_DOWN_PINCONF(C8, SCU494, 3), ++ ASPEED_PULL_DOWN_PINCONF(C9, SCU494, 4), ++ ASPEED_PULL_DOWN_PINCONF(C10, SCU494, 5), ++ ASPEED_PULL_DOWN_PINCONF(M16, SCU494, 6), ++ ASPEED_PULL_DOWN_PINCONF(A15, SCU494, 7), ++ ASPEED_PULL_DOWN_PINCONF(G11, SCU494, 8), ++ ASPEED_PULL_DOWN_PINCONF(H7, SCU494, 9), ++ ASPEED_PULL_DOWN_PINCONF(H8, SCU494, 10), ++ ASPEED_PULL_DOWN_PINCONF(H9, SCU494, 11), ++ ASPEED_PULL_DOWN_PINCONF(H10, SCU494, 12), ++ ASPEED_PULL_DOWN_PINCONF(H11, SCU494, 13), ++ ASPEED_PULL_DOWN_PINCONF(J9, SCU494, 14), ++ ASPEED_PULL_DOWN_PINCONF(J10, SCU494, 15), ++ ASPEED_PULL_DOWN_PINCONF(E9, SCU494, 16), ++ ASPEED_PULL_DOWN_PINCONF(F9, SCU494, 17), ++ ASPEED_PULL_DOWN_PINCONF(F8, SCU494, 18), ++ ASPEED_PULL_DOWN_PINCONF(M13, SCU494, 19), ++ ASPEED_PULL_DOWN_PINCONF(F7, SCU494, 20), ++ ASPEED_PULL_DOWN_PINCONF(D8, SCU494, 21), ++ ASPEED_PULL_DOWN_PINCONF(E8, SCU494, 22), ++ ASPEED_PULL_DOWN_PINCONF(L12, SCU494, 23), ++ ASPEED_PULL_DOWN_PINCONF(F12, SCU494, 24), ++ ASPEED_PULL_DOWN_PINCONF(E12, SCU494, 25), ++ ASPEED_PULL_DOWN_PINCONF(J12, SCU494, 26), ++ ASPEED_PULL_DOWN_PINCONF(G7, SCU494, 27), ++ ASPEED_PULL_DOWN_PINCONF(G8, SCU494, 28), ++ ASPEED_PULL_DOWN_PINCONF(G9, SCU494, 29), ++ ASPEED_PULL_DOWN_PINCONF(G10, SCU494, 30), ++ ASPEED_PULL_DOWN_PINCONF(K12, SCU494, 31), ++ ASPEED_PULL_DOWN_PINCONF(W17, SCU498, 0), ++ ASPEED_PULL_DOWN_PINCONF(V18, SCU498, 1), ++ ASPEED_PULL_DOWN_PINCONF(W18, SCU498, 2), ++ ASPEED_PULL_DOWN_PINCONF(Y17, SCU498, 3), ++ ASPEED_PULL_DOWN_PINCONF(AA18, SCU498, 4), ++ ASPEED_PULL_DOWN_PINCONF(AA13, SCU498, 5), ++ ASPEED_PULL_DOWN_PINCONF(Y18, SCU498, 6), ++ ASPEED_PULL_DOWN_PINCONF(AA12, SCU498, 7), ++ ASPEED_PULL_DOWN_PINCONF(W20, SCU498, 8), ++ ASPEED_PULL_DOWN_PINCONF(V20, SCU498, 9), ++ ASPEED_PULL_DOWN_PINCONF(Y11, SCU498, 10), ++ ASPEED_PULL_DOWN_PINCONF(V14, SCU498, 11), ++ ASPEED_PULL_DOWN_PINCONF(V19, SCU498, 12), ++ ASPEED_PULL_DOWN_PINCONF(W14, SCU498, 13), ++ ASPEED_PULL_DOWN_PINCONF(Y20, SCU498, 14), ++ ASPEED_PULL_DOWN_PINCONF(AB19, SCU498, 15), ++ ASPEED_PULL_DOWN_PINCONF(U21, SCU498, 16), ++ ASPEED_PULL_DOWN_PINCONF(T24, SCU498, 17), ++ ASPEED_PULL_DOWN_PINCONF(V24, SCU498, 18), ++ ASPEED_PULL_DOWN_PINCONF(V22, SCU498, 19), ++ ASPEED_PULL_DOWN_PINCONF(T23, SCU498, 20), ++ ASPEED_PULL_DOWN_PINCONF(AC25, SCU498, 21), ++ ASPEED_PULL_DOWN_PINCONF(AB25, SCU498, 22), ++ ASPEED_PULL_DOWN_PINCONF(AC24, SCU498, 23), ++}; ++ ++static const struct aspeed_pin_config_map aspeed_g7_pin_config_map[] = { ++ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)}, ++ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)}, ++ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)}, ++ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)}, ++ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, 0, 0, GENMASK(1, 0)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, 1, 1, GENMASK(1, 0)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, 2, 2, GENMASK(1, 0)}, ++ { PIN_CONFIG_DRIVE_STRENGTH, 3, 3, GENMASK(1, 0)}, ++ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)}, ++ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)}, ++}; ++ ++static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = { ++ .pins = aspeed_g7_soc1_pins, ++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins), ++ .pinmux = { ++ .groups = aspeed_g7_soc1_pingroups, ++ .ngroups = ARRAY_SIZE(aspeed_g7_soc1_pingroups), ++ .functions = aspeed_g7_soc1_funcs, ++ .nfunctions = ARRAY_SIZE(aspeed_g7_soc1_funcs), ++ .configs_g7 = pin_cfg, ++ .nconfigs_g7 = ARRAY_SIZE(pin_cfg), ++ }, ++ .configs = aspeed_g7_configs, ++ .nconfigs = ARRAY_SIZE(aspeed_g7_configs), ++ .confmaps = aspeed_g7_pin_config_map, ++ .nconfmaps = ARRAY_SIZE(aspeed_g7_pin_config_map), ++}; ++ ++static int aspeed_g7_soc1_pinctrl_probe(struct platform_device *pdev) ++{ ++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc1_pinctrl_desc, ++ &aspeed_g7_pinctrl_data); ++} ++ ++static const struct of_device_id aspeed_g7_soc1_pinctrl_match[] = { ++ { .compatible = "aspeed,ast2700-soc1-pinctrl" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_g7_soc1_pinctrl_match); ++ ++static struct platform_driver aspeed_g7_soc1_pinctrl_driver = { ++ .probe = aspeed_g7_soc1_pinctrl_probe, ++ .driver = { ++ .name = "aspeed-g7-soc1-pinctrl", ++ .of_match_table = aspeed_g7_soc1_pinctrl_match, ++ .suppress_bind_attrs = true, ++ }, ++}; ++ ++static int __init aspeed_g7_soc1_pinctrl_register(void) ++{ ++ return platform_driver_register(&aspeed_g7_soc1_pinctrl_driver); ++} ++arch_initcall(aspeed_g7_soc1_pinctrl_register); +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c 2025-12-23 10:16:21.136032468 +0000 +@@ -285,6 +285,32 @@ + return 0; + } + ++int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, ++ unsigned int group) ++{ ++ int i, j; ++ int pin; ++ const struct aspeed_g7_funcfg *funcfg; ++ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ const struct aspeed_pin_group *pingroup = ++ &pinctrl->pinmux.groups[group]; ++ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7; ++ ++ for (i = 0; i < pingroup->npins; i++) { ++ pin = pingroup->pins[i]; ++ funcfg = pin_cfg[pin].funcfg; ++ ++ for (j = 0; j < pin_cfg[pin].nfuncfg; j++) { ++ if (strcmp(funcfg[j].name, pingroup->name) == 0) { ++ regmap_update_bits(pinctrl->scu, funcfg[j].reg, ++ funcfg[j].mask, ++ funcfg[j].val); ++ } ++ } ++ } ++ return 0; ++} ++ + static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr) + { + /* +@@ -440,6 +466,27 @@ + return 0; + } + ++int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev, ++ struct pinctrl_gpio_range *range, ++ unsigned int offset) ++{ ++ int i; ++ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7; ++ const struct aspeed_g7_funcfg *funcfg = pin_cfg[offset].funcfg; ++ ++ for (i = 0; i < pin_cfg[offset].nfuncfg; i++) { ++ if (!strncmp(funcfg[i].name, "GPI", 3)) { ++ regmap_update_bits(pinctrl->scu, funcfg[i].reg, ++ funcfg[i].mask, funcfg[i].val); ++ break; ++ } ++ regmap_update_bits(pinctrl->scu, funcfg[i].reg, funcfg[i].mask, ++ 0); ++ } ++ return 0; ++} ++ + int aspeed_pinctrl_probe(struct platform_device *pdev, + struct pinctrl_desc *pdesc, + struct aspeed_pinctrl_data *pdata) +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h 2025-12-23 10:16:21.136032468 +0000 +@@ -70,15 +70,15 @@ + struct regmap *scu; + + const struct pinctrl_pin_desc *pins; +- const unsigned int npins; ++ unsigned int npins; + + const struct aspeed_pin_config *configs; +- const unsigned int nconfigs; ++ unsigned int nconfigs; + + struct aspeed_pinmux_data pinmux; + + const struct aspeed_pin_config_map *confmaps; +- const unsigned int nconfmaps; ++ unsigned int nconfmaps; + }; + + /* Aspeed pinctrl helpers */ +@@ -101,6 +101,11 @@ + int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset); ++int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, ++ unsigned int group); ++int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev, ++ struct pinctrl_gpio_range *range, ++ unsigned int offset); + int aspeed_pinctrl_probe(struct platform_device *pdev, + struct pinctrl_desc *pdesc, + struct aspeed_pinctrl_data *pdata); +diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h +--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h 2025-12-23 10:16:21.136032468 +0000 +@@ -792,6 +792,33 @@ + const struct aspeed_sig_expr *expr, bool enabled); + }; + ++struct aspeed_g7_funcfg { ++ char *name; ++ u32 reg; ++ u32 mask; ++ int val; ++}; ++ ++struct aspeed_g7_pincfg { ++ struct aspeed_g7_funcfg *funcfg; ++ unsigned int nfuncfg; ++}; ++ ++#define PIN_CFG(cfg_name, cfg_reg, cfg_mask, cfg_val) \ ++ { \ ++ .name = #cfg_name, .reg = cfg_reg, .mask = cfg_mask, \ ++ .val = cfg_val \ ++ } ++#define FUNCFG_SYM(pin) funcfg_ ## pin ++#define FUNCFG_PTR(pin) (&FUNCFG_SYM(pin)) ++ ++#define FUNCFG_DESCL(pin, ...) \ ++ static const struct aspeed_g7_funcfg FUNCFG_SYM(pin)[] = { __VA_ARGS__ } ++ ++#define PINCFG_PIN(pin) \ ++ [pin] = { .funcfg = (struct aspeed_g7_funcfg *)FUNCFG_PTR(pin), \ ++ .nfuncfg = ARRAY_SIZE(FUNCFG_SYM(pin)) } ++ + struct aspeed_pinmux_data { + struct device *dev; + struct regmap *maps[ASPEED_NR_PINMUX_IPS]; +@@ -799,10 +826,14 @@ + const struct aspeed_pinmux_ops *ops; + + const struct aspeed_pin_group *groups; +- const unsigned int ngroups; ++ unsigned int ngroups; + + const struct aspeed_pin_function *functions; +- const unsigned int nfunctions; ++ unsigned int nfunctions; ++ ++ const struct aspeed_g7_pincfg *configs_g7; ++ unsigned int nconfigs_g7; ++ + }; + + int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled, +diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c +--- a/drivers/pwm/core.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/pwm/core.c 2025-12-23 10:16:22.024017585 +0000 +@@ -426,9 +426,8 @@ + * chip. A negative error code is returned if the index is not valid for the + * specified PWM chip or if the PWM device cannot be requested. + */ +-static struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, +- unsigned int index, +- const char *label) ++struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, ++ unsigned int index, const char *label) + { + struct pwm_device *pwm; + int err; +@@ -446,6 +445,7 @@ + + return pwm; + } ++EXPORT_SYMBOL_GPL(pwm_request_from_chip); + + struct pwm_device * + of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args) +diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig +--- a/drivers/reset/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/reset/Kconfig 2025-12-23 10:16:09.724223772 +0000 +@@ -22,6 +22,13 @@ + This option enables support for the external reset functions for + peripheral PHYs on the Altera Arria10 System Resource Chip. + ++config RESET_ASPEED ++ tristate "ASPEED Reset Driver" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ select AUXILIARY_BUS ++ help ++ This enables the reset controller driver for AST2700. ++ + config RESET_ATH79 + bool "AR71xx Reset Driver" if COMPILE_TEST + default ATH79 +@@ -51,8 +58,8 @@ + + config RESET_BRCMSTB + tristate "Broadcom STB reset controller" +- depends on ARCH_BRCMSTB || COMPILE_TEST +- default ARCH_BRCMSTB ++ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST ++ default ARCH_BRCMSTB || ARCH_BCM2835 + help + This enables the reset controller driver for Broadcom STB SoCs using + a SUN_TOP_CTRL_SW_INIT style controller. +@@ -60,11 +67,11 @@ + config RESET_BRCMSTB_RESCAL + tristate "Broadcom STB RESCAL reset controller" + depends on HAS_IOMEM +- depends on ARCH_BRCMSTB || COMPILE_TEST +- default ARCH_BRCMSTB ++ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST ++ default ARCH_BRCMSTB || ARCH_BCM2835 + help + This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on +- BCM7216. ++ BCM7216 or the BCM2712. + + config RESET_EYEQ + bool "Mobileye EyeQ reset controller" +@@ -170,6 +177,7 @@ + config RESET_NPCM + bool "NPCM BMC Reset Driver" if COMPILE_TEST + default ARCH_NPCM ++ select AUXILIARY_BUS + help + This enables the reset controller driver for Nuvoton NPCM + BMC SoCs. +diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile +--- a/drivers/reset/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/reset/Makefile 2025-12-23 10:16:13.896153811 +0000 +@@ -5,6 +5,7 @@ + obj-y += sti/ + obj-y += tegra/ + obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o ++obj-$(CONFIG_RESET_ASPEED) += reset-aspeed.o + obj-$(CONFIG_RESET_ATH79) += reset-ath79.o + obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o + obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o +diff --git a/drivers/reset/reset-aspeed.c b/drivers/reset/reset-aspeed.c +--- a/drivers/reset/reset-aspeed.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/reset/reset-aspeed.c 2025-12-23 10:16:21.120032737 +0000 +@@ -0,0 +1,310 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (c) 2024 ASPEED Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define SCU0_RESET_CTRL1 0x200 ++#define SCU0_RESET_CTRL2 0x220 ++#define SCU1_RESET_CTRL1 0x200 ++#define SCU1_RESET_CTRL2 0x220 ++#define SCU1_PCIE3_CTRL 0x908 ++ ++struct aspeed_reset; ++ ++struct ast2700_reset_signal { ++ bool dedicated_clr; /* dedicated reset clr offset */ ++ u32 offset, bit; ++}; ++ ++struct aspeed_reset_info { ++ unsigned int nr_resets; ++ const struct ast2700_reset_signal *signal; ++}; ++ ++struct aspeed_reset { ++ struct reset_controller_dev rcdev; ++ struct aspeed_reset_info *info; ++ spinlock_t lock; /* Protect read-modify-write cycle */ ++ void __iomem *base; ++}; ++ ++static const struct ast2700_reset_signal ast2700_reset0_signals[] = { ++ [SCU0_RESET_SDRAM] = { true, SCU0_RESET_CTRL1, BIT(0) }, ++ [SCU0_RESET_DDRPHY] = { true, SCU0_RESET_CTRL1, BIT(1) }, ++ [SCU0_RESET_RSA] = { true, SCU0_RESET_CTRL1, BIT(2) }, ++ [SCU0_RESET_SHA3] = { true, SCU0_RESET_CTRL1, BIT(3) }, ++ [SCU0_RESET_HACE] = { true, SCU0_RESET_CTRL1, BIT(4) }, ++ [SCU0_RESET_SOC] = { true, SCU0_RESET_CTRL1, BIT(5) }, ++ [SCU0_RESET_VIDEO] = { true, SCU0_RESET_CTRL1, BIT(6) }, ++ [SCU0_RESET_2D] = { true, SCU0_RESET_CTRL1, BIT(7) }, ++ [SCU0_RESET_PCIS] = { true, SCU0_RESET_CTRL1, BIT(8) }, ++ [SCU0_RESET_RVAS0] = { true, SCU0_RESET_CTRL1, BIT(9) }, ++ [SCU0_RESET_RVAS1] = { true, SCU0_RESET_CTRL1, BIT(10) }, ++ [SCU0_RESET_SM3] = { true, SCU0_RESET_CTRL1, BIT(11) }, ++ [SCU0_RESET_SM4] = { true, SCU0_RESET_CTRL1, BIT(12) }, ++ [SCU0_RESET_CRT0] = { true, SCU0_RESET_CTRL1, BIT(13) }, ++ [SCU0_RESET_ECC] = { true, SCU0_RESET_CTRL1, BIT(14) }, ++ [SCU0_RESET_DP_PCI] = { true, SCU0_RESET_CTRL1, BIT(15) }, ++ [SCU0_RESET_UFS] = { true, SCU0_RESET_CTRL1, BIT(16) }, ++ [SCU0_RESET_EMMC] = { true, SCU0_RESET_CTRL1, BIT(17) }, ++ [SCU0_RESET_PCIE1RST] = { true, SCU0_RESET_CTRL1, BIT(18) }, ++ [SCU0_RESET_PCIE1RSTOE] = { true, SCU0_RESET_CTRL1, BIT(19) }, ++ [SCU0_RESET_PCIE0RST] = { true, SCU0_RESET_CTRL1, BIT(20) }, ++ [SCU0_RESET_PCIE0RSTOE] = { true, SCU0_RESET_CTRL1, BIT(21) }, ++ [SCU0_RESET_JTAG] = { true, SCU0_RESET_CTRL1, BIT(22) }, ++ [SCU0_RESET_MCTP0] = { true, SCU0_RESET_CTRL1, BIT(23) }, ++ [SCU0_RESET_MCTP1] = { true, SCU0_RESET_CTRL1, BIT(24) }, ++ [SCU0_RESET_XDMA0] = { true, SCU0_RESET_CTRL1, BIT(25) }, ++ [SCU0_RESET_XDMA1] = { true, SCU0_RESET_CTRL1, BIT(26) }, ++ [SCU0_RESET_H2X1] = { true, SCU0_RESET_CTRL1, BIT(27) }, ++ [SCU0_RESET_DP] = { true, SCU0_RESET_CTRL1, BIT(28) }, ++ [SCU0_RESET_DP_MCU] = { true, SCU0_RESET_CTRL1, BIT(29) }, ++ [SCU0_RESET_SSP] = { true, SCU0_RESET_CTRL1, BIT(30) }, ++ [SCU0_RESET_H2X0] = { true, SCU0_RESET_CTRL1, BIT(31) }, ++ [SCU0_RESET_PORTA_VHUB] = { true, SCU0_RESET_CTRL2, BIT(0) }, ++ [SCU0_RESET_PORTA_PHY3] = { true, SCU0_RESET_CTRL2, BIT(1) }, ++ [SCU0_RESET_PORTA_XHCI] = { true, SCU0_RESET_CTRL2, BIT(2) }, ++ [SCU0_RESET_PORTB_VHUB] = { true, SCU0_RESET_CTRL2, BIT(3) }, ++ [SCU0_RESET_PORTB_PHY3] = { true, SCU0_RESET_CTRL2, BIT(4) }, ++ [SCU0_RESET_PORTB_XHCI] = { true, SCU0_RESET_CTRL2, BIT(5) }, ++ [SCU0_RESET_PORTA_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(6) }, ++ [SCU0_RESET_PORTB_VHUB_EHCI] = { true, SCU0_RESET_CTRL2, BIT(7) }, ++ [SCU0_RESET_UHCI] = { true, SCU0_RESET_CTRL2, BIT(8) }, ++ [SCU0_RESET_TSP] = { true, SCU0_RESET_CTRL2, BIT(9) }, ++ [SCU0_RESET_E2M0] = { true, SCU0_RESET_CTRL2, BIT(10) }, ++ [SCU0_RESET_E2M1] = { true, SCU0_RESET_CTRL2, BIT(11) }, ++ [SCU0_RESET_VLINK] = { true, SCU0_RESET_CTRL2, BIT(12) }, ++}; ++ ++static const struct ast2700_reset_signal ast2700_reset1_signals[] = { ++ [SCU1_RESET_LPC0] = { true, SCU1_RESET_CTRL1, BIT(0) }, ++ [SCU1_RESET_LPC1] = { true, SCU1_RESET_CTRL1, BIT(1) }, ++ [SCU1_RESET_MII] = { true, SCU1_RESET_CTRL1, BIT(2) }, ++ [SCU1_RESET_PECI] = { true, SCU1_RESET_CTRL1, BIT(3) }, ++ [SCU1_RESET_PWM] = { true, SCU1_RESET_CTRL1, BIT(4) }, ++ [SCU1_RESET_MAC0] = { true, SCU1_RESET_CTRL1, BIT(5) }, ++ [SCU1_RESET_MAC1] = { true, SCU1_RESET_CTRL1, BIT(6) }, ++ [SCU1_RESET_MAC2] = { true, SCU1_RESET_CTRL1, BIT(7) }, ++ [SCU1_RESET_ADC] = { true, SCU1_RESET_CTRL1, BIT(8) }, ++ [SCU1_RESET_SD] = { true, SCU1_RESET_CTRL1, BIT(9) }, ++ [SCU1_RESET_ESPI0] = { true, SCU1_RESET_CTRL1, BIT(10) }, ++ [SCU1_RESET_ESPI1] = { true, SCU1_RESET_CTRL1, BIT(11) }, ++ [SCU1_RESET_JTAG1] = { true, SCU1_RESET_CTRL1, BIT(12) }, ++ [SCU1_RESET_SPI0] = { true, SCU1_RESET_CTRL1, BIT(13) }, ++ [SCU1_RESET_SPI1] = { true, SCU1_RESET_CTRL1, BIT(14) }, ++ [SCU1_RESET_SPI2] = { true, SCU1_RESET_CTRL1, BIT(15) }, ++ [SCU1_RESET_I3C0] = { true, SCU1_RESET_CTRL1, BIT(16) }, ++ [SCU1_RESET_I3C1] = { true, SCU1_RESET_CTRL1, BIT(17) }, ++ [SCU1_RESET_I3C2] = { true, SCU1_RESET_CTRL1, BIT(18) }, ++ [SCU1_RESET_I3C3] = { true, SCU1_RESET_CTRL1, BIT(19) }, ++ [SCU1_RESET_I3C4] = { true, SCU1_RESET_CTRL1, BIT(20) }, ++ [SCU1_RESET_I3C5] = { true, SCU1_RESET_CTRL1, BIT(21) }, ++ [SCU1_RESET_I3C6] = { true, SCU1_RESET_CTRL1, BIT(22) }, ++ [SCU1_RESET_I3C7] = { true, SCU1_RESET_CTRL1, BIT(23) }, ++ [SCU1_RESET_I3C8] = { true, SCU1_RESET_CTRL1, BIT(24) }, ++ [SCU1_RESET_I3C9] = { true, SCU1_RESET_CTRL1, BIT(25) }, ++ [SCU1_RESET_I3C10] = { true, SCU1_RESET_CTRL1, BIT(26) }, ++ [SCU1_RESET_I3C11] = { true, SCU1_RESET_CTRL1, BIT(27) }, ++ [SCU1_RESET_I3C12] = { true, SCU1_RESET_CTRL1, BIT(28) }, ++ [SCU1_RESET_I3C13] = { true, SCU1_RESET_CTRL1, BIT(29) }, ++ [SCU1_RESET_I3C14] = { true, SCU1_RESET_CTRL1, BIT(30) }, ++ [SCU1_RESET_I3C15] = { true, SCU1_RESET_CTRL1, BIT(31) }, ++ [SCU1_RESET_MCU0] = { true, SCU1_RESET_CTRL2, BIT(0) }, ++ [SCU1_RESET_MCU1] = { true, SCU1_RESET_CTRL2, BIT(1) }, ++ [SCU1_RESET_H2A_SPI1] = { true, SCU1_RESET_CTRL2, BIT(2) }, ++ [SCU1_RESET_H2A_SPI2] = { true, SCU1_RESET_CTRL2, BIT(3) }, ++ [SCU1_RESET_UART0] = { true, SCU1_RESET_CTRL2, BIT(4) }, ++ [SCU1_RESET_UART1] = { true, SCU1_RESET_CTRL2, BIT(5) }, ++ [SCU1_RESET_UART2] = { true, SCU1_RESET_CTRL2, BIT(6) }, ++ [SCU1_RESET_UART3] = { true, SCU1_RESET_CTRL2, BIT(7) }, ++ [SCU1_RESET_I2C_FILTER] = { true, SCU1_RESET_CTRL2, BIT(8) }, ++ [SCU1_RESET_CALIPTRA] = { true, SCU1_RESET_CTRL2, BIT(9) }, ++ [SCU1_RESET_XDMA] = { true, SCU1_RESET_CTRL2, BIT(10) }, ++ [SCU1_RESET_FSI] = { true, SCU1_RESET_CTRL2, BIT(12) }, ++ [SCU1_RESET_CAN] = { true, SCU1_RESET_CTRL2, BIT(13) }, ++ [SCU1_RESET_MCTP] = { true, SCU1_RESET_CTRL2, BIT(14) }, ++ [SCU1_RESET_I2C] = { true, SCU1_RESET_CTRL2, BIT(15) }, ++ [SCU1_RESET_UART6] = { true, SCU1_RESET_CTRL2, BIT(16) }, ++ [SCU1_RESET_UART7] = { true, SCU1_RESET_CTRL2, BIT(17) }, ++ [SCU1_RESET_UART8] = { true, SCU1_RESET_CTRL2, BIT(18) }, ++ [SCU1_RESET_UART9] = { true, SCU1_RESET_CTRL2, BIT(19) }, ++ [SCU1_RESET_LTPI0] = { true, SCU1_RESET_CTRL2, BIT(20) }, ++ [SCU1_RESET_VGAL] = { true, SCU1_RESET_CTRL2, BIT(21) }, ++ [SCU1_RESET_LTPI1] = { true, SCU1_RESET_CTRL2, BIT(22) }, ++ [SCU1_RESET_ACE] = { true, SCU1_RESET_CTRL2, BIT(23) }, ++ [SCU1_RESET_E2M] = { true, SCU1_RESET_CTRL2, BIT(24) }, ++ [SCU1_RESET_UHCI] = { true, SCU1_RESET_CTRL2, BIT(25) }, ++ [SCU1_RESET_PORTC_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(26) }, ++ [SCU1_RESET_PORTC_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(27) }, ++ [SCU1_RESET_PORTD_USB2UART] = { true, SCU1_RESET_CTRL2, BIT(28) }, ++ [SCU1_RESET_PORTD_VHUB_EHCI] = { true, SCU1_RESET_CTRL2, BIT(29) }, ++ [SCU1_RESET_H2X] = { true, SCU1_RESET_CTRL2, BIT(30) }, ++ [SCU1_RESET_I3CDMA] = { true, SCU1_RESET_CTRL2, BIT(31) }, ++ [SCU1_RESET_PCIE2RST] = { false, SCU1_PCIE3_CTRL, BIT(0) }, ++}; ++ ++static inline struct aspeed_reset *to_aspeed_reset(struct reset_controller_dev *rcdev) ++{ ++ return container_of(rcdev, struct aspeed_reset, rcdev); ++} ++ ++static int aspeed_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct aspeed_reset *rc = to_aspeed_reset(rcdev); ++ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; ++ ++ if (rc->info->signal[id].dedicated_clr) { ++ writel(rc->info->signal[id].bit, reg_offset); ++ } else { ++ guard(spinlock_irqsave)(&rc->lock); ++ writel(readl(reg_offset) & ~rc->info->signal[id].bit, reg_offset); ++ } ++ ++ return 0; ++} ++ ++static int aspeed_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct aspeed_reset *rc = to_aspeed_reset(rcdev); ++ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; ++ ++ if (rc->info->signal[id].dedicated_clr) { ++ writel(rc->info->signal[id].bit, reg_offset + 0x04); ++ } else { ++ guard(spinlock_irqsave)(&rc->lock); ++ writel(readl(reg_offset) | rc->info->signal[id].bit, reg_offset); ++ } ++ ++ return 0; ++} ++ ++static int aspeed_reset_status(struct reset_controller_dev *rcdev, unsigned long id) ++{ ++ struct aspeed_reset *rc = to_aspeed_reset(rcdev); ++ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset; ++ ++ return (readl(reg_offset) & rc->info->signal[id].bit) ? 1 : 0; ++} ++ ++static const struct reset_control_ops aspeed_reset_ops = { ++ .assert = aspeed_reset_assert, ++ .deassert = aspeed_reset_deassert, ++ .status = aspeed_reset_status, ++}; ++ ++static int aspeed_reset_probe(struct auxiliary_device *adev, ++ const struct auxiliary_device_id *id) ++{ ++ struct aspeed_reset *reset; ++ struct device *dev = &adev->dev; ++ ++ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL); ++ if (!reset) ++ return -ENOMEM; ++ ++ spin_lock_init(&reset->lock); ++ ++ reset->info = (struct aspeed_reset_info *)id->driver_data; ++ reset->rcdev.owner = THIS_MODULE; ++ reset->rcdev.nr_resets = reset->info->nr_resets; ++ reset->rcdev.ops = &aspeed_reset_ops; ++ reset->rcdev.of_node = dev->parent->of_node; ++ reset->rcdev.dev = dev; ++ reset->rcdev.of_reset_n_cells = 1; ++ reset->base = (void __iomem *)adev->dev.platform_data; ++ ++ dev_set_drvdata(dev, reset); ++ ++ return devm_reset_controller_register(dev, &reset->rcdev); ++} ++ ++static void aspeed_reset_unregister_adev(void *_adev) ++{ ++ struct auxiliary_device *adev = _adev; ++ ++ auxiliary_device_delete(adev); ++ auxiliary_device_uninit(adev); ++} ++ ++static void aspeed_reset_adev_release(struct device *dev) ++{ ++ struct auxiliary_device *adev = to_auxiliary_dev(dev); ++ ++ kfree(adev); ++} ++ ++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base, ++ const char *adev_name) ++{ ++ struct auxiliary_device *adev; ++ int ret; ++ ++ adev = kzalloc(sizeof(*adev), GFP_KERNEL); ++ if (!adev) ++ return -ENOMEM; ++ ++ adev->name = adev_name; ++ adev->dev.parent = clk_dev; ++ adev->dev.release = aspeed_reset_adev_release; ++ adev->id = 666u; ++ ++ ret = auxiliary_device_init(adev); ++ if (ret) { ++ kfree(adev); ++ return ret; ++ } ++ ++ ret = auxiliary_device_add(adev); ++ if (ret) { ++ auxiliary_device_uninit(adev); ++ return ret; ++ } ++ ++ adev->dev.platform_data = (__force void *)base; ++ ++ return devm_add_action_or_reset(clk_dev, aspeed_reset_unregister_adev, adev); ++} ++EXPORT_SYMBOL_GPL(aspeed_reset_controller_register); ++ ++static const struct aspeed_reset_info ast2700_reset0_info = { ++ .nr_resets = ARRAY_SIZE(ast2700_reset0_signals), ++ .signal = ast2700_reset0_signals, ++}; ++ ++static const struct aspeed_reset_info ast2700_reset1_info = { ++ .nr_resets = ARRAY_SIZE(ast2700_reset1_signals), ++ .signal = ast2700_reset1_signals, ++}; ++ ++static const struct auxiliary_device_id aspeed_reset_ids[] = { ++ { .name = "reset_aspeed.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info }, ++ { .name = "reset_aspeed.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info }, ++ { } ++}; ++MODULE_DEVICE_TABLE(auxiliary, aspeed_reset_ids); ++ ++static struct auxiliary_driver aspeed_reset_driver = { ++ .probe = aspeed_reset_probe, ++ .id_table = aspeed_reset_ids, ++}; ++ ++static int __init rest_aspeed_init(void) ++{ ++ return auxiliary_driver_register(&aspeed_reset_driver); ++} ++subsys_initcall(rest_aspeed_init); ++ ++MODULE_AUTHOR("Ryan Chen "); ++MODULE_DESCRIPTION("ASPEED SoC Reset Controller Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c +--- a/drivers/rtc/rtc-aspeed.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/rtc/rtc-aspeed.c 2025-12-23 10:16:21.094033172 +0000 +@@ -10,14 +10,64 @@ + struct aspeed_rtc { + struct rtc_device *rtc_dev; + void __iomem *base; ++ spinlock_t irq_lock; /* interrupt enable register lock */ ++ struct mutex write_mutex; /* serialize registers write */ + }; + + #define RTC_TIME 0x00 + #define RTC_YEAR 0x04 ++#define RTC_ALARM 0x08 + #define RTC_CTRL 0x10 ++#define RTC_ALARM_STATUS 0x14 + +-#define RTC_UNLOCK BIT(1) +-#define RTC_ENABLE BIT(0) ++#define RTC_ENABLE BIT(0) ++#define RTC_UNLOCK BIT(1) ++#define RTC_ALARM_MODE BIT(2) ++#define RTC_ALARM_SEC_ENABLE BIT(3) ++#define RTC_ALARM_MIN_ENABLE BIT(4) ++#define RTC_ALARM_HOUR_ENABLE BIT(5) ++#define RTC_ALARM_MDAY_ENABLE BIT(6) ++ ++#define RTC_ALARM_SEC_CB_STATUS BIT(0) ++#define RTC_ALARM_MIN_STATUS BIT(1) ++#define RTC_ALARM_HOUR_STATUS BIT(2) ++#define RTC_ALARM_MDAY_STATUS BIT(3) ++ ++/* ++ * enable a rtc interrupt ++ */ ++static void aspeed_rtc_int_enable(struct aspeed_rtc *rtc, u32 intr) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rtc->irq_lock, flags); ++ writel(readl(rtc->base + RTC_CTRL) | intr, rtc->base + RTC_CTRL); ++ spin_unlock_irqrestore(&rtc->irq_lock, flags); ++} ++ ++/* ++ * disable a rtc interrupt ++ */ ++static void aspeed_rtc_int_disable(struct aspeed_rtc *rtc, u32 intr) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rtc->irq_lock, flags); ++ writel(readl(rtc->base + RTC_CTRL) & ~intr, rtc->base + RTC_CTRL); ++ spin_unlock_irqrestore(&rtc->irq_lock, flags); ++} ++ ++/* ++ * clean a rtc interrupt status ++ */ ++static void aspeed_rtc_clean_alarm(struct aspeed_rtc *rtc) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&rtc->irq_lock, flags); ++ writel(readl(rtc->base + RTC_ALARM_STATUS), rtc->base + RTC_ALARM_STATUS); ++ spin_unlock_irqrestore(&rtc->irq_lock, flags); ++} + + static int aspeed_rtc_read_time(struct device *dev, struct rtc_time *tm) + { +@@ -45,7 +95,7 @@ + tm->tm_mon = ((reg2 >> 0) & 0x0f) - 1; + tm->tm_year = year + (cent * 100) - 1900; + +- dev_dbg(dev, "%s %ptR", __func__, tm); ++ dev_dbg(dev, "%s %ptR\n", __func__, tm); + + return 0; + } +@@ -56,6 +106,8 @@ + u32 reg1, reg2, ctrl; + int year, cent; + ++ dev_dbg(dev, "%s %ptR\n", __func__, tm); ++ + cent = (tm->tm_year + 1900) / 100; + year = tm->tm_year % 100; + +@@ -77,40 +129,211 @@ + return 0; + } + ++static int aspeed_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) ++{ ++ struct aspeed_rtc *rtc = dev_get_drvdata(dev); ++ unsigned int alarm_enable; ++ ++ dev_dbg(dev, "%s, enabled:%x\n", __func__, enabled); ++ ++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE | ++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE; ++ if (enabled) ++ aspeed_rtc_int_enable(rtc, alarm_enable); ++ else ++ aspeed_rtc_int_disable(rtc, alarm_enable); ++ ++ return 0; ++} ++ ++static int aspeed_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) ++{ ++ struct aspeed_rtc *rtc = dev_get_drvdata(dev); ++ u32 reg1, reg2; ++ unsigned int alarm_enable; ++ unsigned int alarm_status; ++ ++ if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) { ++ dev_dbg(dev, "%s failing as rtc disabled\n", __func__); ++ return -EINVAL; ++ } ++ ++ do { ++ reg2 = readl(rtc->base + RTC_YEAR); ++ reg1 = readl(rtc->base + RTC_TIME); ++ } while (reg1 != readl(rtc->base + RTC_TIME)); ++ ++ /* read alarm value */ ++ alarm->time.tm_mday = (reg1 >> 24) & 0x1f; ++ alarm->time.tm_hour = (reg1 >> 16) & 0x1f; ++ alarm->time.tm_min = (reg1 >> 8) & 0x3f; ++ alarm->time.tm_sec = (reg1 >> 0) & 0x3f; ++ ++ dev_dbg(dev, "%s %ptR\n", __func__, &alarm->time); ++ ++ alarm_enable = RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE | ++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE; ++ alarm_status = RTC_ALARM_SEC_CB_STATUS | RTC_ALARM_MIN_STATUS | ++ RTC_ALARM_HOUR_STATUS | RTC_ALARM_MDAY_STATUS; ++ ++ /* don't allow the ALARM read to mess up ALARM_STATUS */ ++ mutex_lock(&rtc->write_mutex); ++ ++ /* alarm is enabled if the interrupt is enabled */ ++ if (readl(rtc->base + RTC_CTRL) & alarm_enable) ++ alarm->enabled = true; ++ else ++ alarm->enabled = false; ++ ++ /* alarm interrupt asserted or not */ ++ if (readl(rtc->base + RTC_ALARM_STATUS) & alarm_status) ++ alarm->pending = true; ++ else ++ alarm->pending = false; ++ ++ mutex_unlock(&rtc->write_mutex); ++ ++ return 0; ++} ++ ++static int aspeed_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) ++{ ++ struct aspeed_rtc *rtc = dev_get_drvdata(dev); ++ unsigned int alarm_enable; ++ u32 reg; ++ ++ if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) { ++ dev_dbg(dev, "%s failing as rtc disabled\n", __func__); ++ return -EINVAL; ++ } ++ ++ dev_dbg(dev, "%s %ptR\n", __func__, &alarm->time); ++ ++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE | ++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE; ++ ++ /* don't allow the ALARM read to mess up ALARM_STATUS */ ++ mutex_lock(&rtc->write_mutex); ++ ++ /* write the new alarm time */ ++ reg = (alarm->time.tm_mday << 24) | (alarm->time.tm_hour << 16) | ++ (alarm->time.tm_min << 8) | alarm->time.tm_sec; ++ writel(reg, rtc->base + RTC_ALARM); ++ ++ /* alarm is enabled if the interrupt is enabled */ ++ if (alarm->enabled) ++ aspeed_rtc_int_enable(rtc, alarm_enable); ++ else ++ aspeed_rtc_int_disable(rtc, alarm_enable); ++ ++ mutex_unlock(&rtc->write_mutex); ++ ++ return 0; ++} ++ + static const struct rtc_class_ops aspeed_rtc_ops = { +- .read_time = aspeed_rtc_read_time, +- .set_time = aspeed_rtc_set_time, ++ .read_time = aspeed_rtc_read_time, ++ .set_time = aspeed_rtc_set_time, ++ .alarm_irq_enable = aspeed_rtc_alarm_irq_enable, ++ .read_alarm = aspeed_rtc_read_alarm, ++ .set_alarm = aspeed_rtc_set_alarm, + }; + ++static irqreturn_t aspeed_rtc_irq(int irq, void *dev_id) ++{ ++ struct aspeed_rtc *rtc = dev_id; ++ unsigned int alarm_enable; ++ ++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE | ++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE; ++ aspeed_rtc_int_disable(rtc, alarm_enable); ++ aspeed_rtc_clean_alarm(rtc); ++ ++ return IRQ_HANDLED; ++} ++ + static int aspeed_rtc_probe(struct platform_device *pdev) + { + struct aspeed_rtc *rtc; ++ unsigned int irq; ++ int rc; ++ u32 ctrl; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->base = devm_platform_ioremap_resource(pdev, 0); +- if (IS_ERR(rtc->base)) ++ if (IS_ERR(rtc->base)) { ++ dev_err(&pdev->dev, "cannot ioremap resource for rtc\n"); + return PTR_ERR(rtc->base); ++ } + + rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc_dev)) + return PTR_ERR(rtc->rtc_dev); + ++ spin_lock_init(&rtc->irq_lock); ++ mutex_init(&rtc->write_mutex); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ rc = devm_request_irq(&pdev->dev, irq, aspeed_rtc_irq, ++ 0, pdev->name, rtc); ++ if (rc) { ++ dev_err(&pdev->dev, "interrupt number %d is not available.\n", irq); ++ goto err; ++ } ++ + platform_set_drvdata(pdev, rtc); + ++ device_init_wakeup(&pdev->dev, true); ++ + rtc->rtc_dev->ops = &aspeed_rtc_ops; + rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900; + rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */ + +- return devm_rtc_register_device(rtc->rtc_dev); ++ /* ++ * In devm_rtc_register_device, ++ * rtc_hctosys read time from RTC to check hardware status. ++ * In rtc_read_time, run aspeed_rtc_read_time and check the rtc_time. ++ * As a result, need to enable and initialize RTC time. ++ * ++ * Enable and unlock RTC to initialize RTC time to 1970-01-01T01:01:01 ++ * and re-lock and ensure enable is set now that a time is programmed. ++ */ ++ ctrl = readl(rtc->base + RTC_CTRL); ++ writel(ctrl | RTC_UNLOCK, rtc->base + RTC_CTRL); ++ ++ /* ++ * Initial value set to year:70,mon:0,mday:1,hour:1,min:1,sec:1 ++ * rtc_valid_tm check whether in suitable range or not. ++ */ ++ writel(0x01010101, rtc->base + RTC_TIME); ++ writel(0x00134601, rtc->base + RTC_YEAR); ++ ++ /* Re-lock and ensure enable is set now that a time is programmed */ ++ writel(ctrl | RTC_ENABLE, rtc->base + RTC_CTRL); ++ ++ rc = devm_rtc_register_device(rtc->rtc_dev); ++ if (rc) { ++ dev_err(&pdev->dev, "can't register rtc device\n"); ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ return rc; + } + + static const struct of_device_id aspeed_rtc_match[] = { + { .compatible = "aspeed,ast2400-rtc", }, + { .compatible = "aspeed,ast2500-rtc", }, + { .compatible = "aspeed,ast2600-rtc", }, ++ { .compatible = "aspeed,ast2700-rtc", }, + {} + }; + MODULE_DEVICE_TABLE(of, aspeed_rtc_match); +diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig +--- a/drivers/soc/aspeed/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/Kconfig 2025-12-23 10:16:21.124032669 +0000 +@@ -4,6 +4,12 @@ + + menu "ASPEED SoC drivers" + ++config ASPEED_BMC_DEV ++ tristate "ASPEED BMC Device" ++ ++config ASPEED_HOST_BMC_DEV ++ tristate "ASPEED Host BMC Device" ++ + config ASPEED_LPC_CTRL + tristate "ASPEED LPC firmware cycle control" + select REGMAP +@@ -24,6 +30,13 @@ + allows the BMC to listen on and save the data written by + the host to an arbitrary LPC I/O port. + ++config ASPEED_SSP ++ tristate "ASPEED SSP loader" ++ default n ++ help ++ Driver for loading secondary-service-processor binary ++ ++ + config ASPEED_UART_ROUTING + tristate "ASPEED uart routing control" + select REGMAP +@@ -34,6 +47,16 @@ + users to perform runtime configuration of the RX muxes among + the UART controllers and I/O pins. + ++config ASPEED_LPC_MAILBOX ++ tristate "ASPEED LPC mailbox support" ++ select REGMAP ++ select MFD_SYSCON ++ default ARCH_ASPEED ++ help ++ Provides a driver to control the LPC mailbox which possesses ++ up to 32 data registers for the communication between the Host ++ and the BMC over LPC. ++ + config ASPEED_P2A_CTRL + tristate "ASPEED P2A (VGA MMIO to BMC) bridge control" + select REGMAP +@@ -52,6 +75,134 @@ + help + Say yes to support decoding of ASPEED BMC information. + ++config ASPEED_SBC ++ bool "ASPEED Secure Boot Controller driver" ++ depends on ARM ++ default MACH_ASPEED_G6 ++ help ++ Say yes to provide information about the secure boot controller in ++ debugfs. This is only for AST2600 (ARM32). ++ ++config ASPEED_XDMA ++ tristate "ASPEED XDMA Engine Driver" ++ select REGMAP ++ select MFD_SYSCON ++ depends on HAS_DMA ++ help ++ Enables support for the XDMA Engine found on the ASPEED BMC ++ SoCs. The XDMA engine can perform PCIe DMA operations between the BMC ++ and a host processor. The driver provides ioctl() interface to reset ++ and initialize engine. ++ ++config ASPEED_LPC_PCC ++ tristate "Aspeed Post Code Capture support" ++ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON ++ help ++ Provides a driver to control the LPC PCC interface, ++ allowing the BMC to snoop data bytes written by the ++ the host to an arbitrary LPC I/O port. ++ ++config ASPEED_UDMA ++ tristate "Aspeed UDMA Engine Driver" ++ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON && HAS_DMA ++ help ++ Enable support for the Aspeed UDMA Engine found on the Aspeed AST2XXX ++ SOCs. The UDMA engine can perform UART DMA operations between the memory ++ buffer and the UART/VUART devices. ++ ++config ASPEED_MBOX ++ bool "Enable support for the ASPEED MBOX driver" ++ depends on ARCH_ASPEED ++ help ++ Enable support for the ASPEED MBOX driver. This driver ++ provides a mailbox client to the ASPEED BMC SoC IPC. ++ ++config ASPEED_MCTP ++ tristate "Aspeed MCTP Controller support" ++ depends on REGMAP && MFD_SYSCON ++ help ++ Enable support for Aspeed MCTP Controller. ++ The MCTP controller allows the BMC to communicate with devices on ++ the host PCIe network. ++ ++config ASPEED_DISP_INTF ++ bool "ASPEED Display Interface driver" ++ select REGMAP ++ select MFD_SYSCON ++ default ARCH_ASPEED ++ help ++ Say yes to support control the display interface of ASPEED BMC. ++ ++config ASPEED_UDMA ++ tristate "Aspeed UDMA Engine Driver" ++ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON && HAS_DMA ++ help ++ Enable support for the Aspeed UDMA Engine found on the Aspeed AST2XXX ++ SOCs. The UDMA engine can perform UART DMA operations between the memory ++ buffer and the UART/VUART devices. ++ ++config ASPEED_PCIE_MMBI ++ tristate "ASPEED PCIE MMBI" ++ ++config ASPEED_ESPI ++ tristate "ASPEED eSPI slave driver" ++ select AST2500_ESPI if MACH_ASPEED_G5 ++ select AST2600_ESPI if MACH_ASPEED_G6 ++ default n ++ ++config AST2500_ESPI ++ tristate ++ depends on ASPEED_ESPI ++ help ++ Enable driver support for Aspeed AST2500 eSPI engine. ++ ++config AST2600_ESPI ++ tristate "ASPEED AST2600 eSPI slave driver" ++ help ++ Enable driver support for Aspeed AST2600 eSPI engine. The eSPI engine ++ plays as a slave device in BMC to communicate with the Host over ++ the eSPI interface. The four eSPI channels, namely peripheral, ++ virtual wire, out-of-band, and flash are supported. ++ ++config AST2700_ESPI ++ tristate "ASPEED AST2700 eSPI slave driver" ++ help ++ Enable driver support for Aspeed AST2700 eSPI engine. The eSPI engine ++ plays as a slave device in BMC to communicate with the Host over ++ the eSPI interface. The four eSPI channels, namely peripheral, ++ virtual wire, out-of-band, and flash are supported. ++ ++config AST2700_RTC_OVER_ESPI ++ tristate "ASPEED AST2700 RTC over eSPI drvier" ++ depends on HAS_IOMEM ++ help ++ Enable driver support for Aspeed AST2700 RTC over eSPI function. ++ Periodically copies RAM content from a RTC tm to a memory-mapped eSPI region. ++ ++config ASPEED_OTP ++ tristate ++ help ++ Enable driver support for Aspeed OTP driver. Each bit in One ++ Time Prgrammable (OTP) memory is capable to be programmed once. ++ The OTP driver performs basic read/program operations of ++ OTP memory. ++ ++config AST2600_OTP ++ tristate "AST2600 OTP Driver" ++ select ASPEED_OTP ++ depends on ARCH_ASPEED ++ help ++ Enable driver support for Aspeed AST2600 OTP driver. ++ ++config AST2700_OTP ++ tristate "AST2700 OTP Driver" ++ select ASPEED_OTP ++ depends on ARCH_ASPEED ++ help ++ Enable driver support for Aspeed AST2700 OTP driver. ++ ++source "drivers/soc/aspeed/rvas/Kconfig" ++ + endmenu + + endif +diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile +--- a/drivers/soc/aspeed/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/Makefile 2025-12-23 10:16:21.183031681 +0000 +@@ -1,6 +1,26 @@ + # SPDX-License-Identifier: GPL-2.0-only ++obj-$(CONFIG_ASPEED_BMC_DEV) += aspeed-bmc-dev.o ++obj-$(CONFIG_ASPEED_HOST_BMC_DEV) += aspeed-host-bmc-dev.o + obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o + obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o ++obj-$(CONFIG_ASPEED_LPC_MAILBOX) += aspeed-lpc-mbox.o ++obj-$(CONFIG_ASPEED_LPC_PCC) += aspeed-lpc-pcc.o ++obj-$(CONFIG_ASPEED_UDMA) += aspeed-udma.o + obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o ++obj-$(CONFIG_ASPEED_SSP) += aspeed-ssp.o + obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o + obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o ++obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o ++obj-$(CONFIG_AST2500_ESPI) += ast2500-espi.o ++obj-$(CONFIG_AST2600_ESPI) += ast2600-espi.o ++obj-$(CONFIG_AST2700_ESPI) += ast2700-espi.o ++obj-$(CONFIG_ASPEED_RVAS) += rvas/ ++obj-$(CONFIG_ARCH_ASPEED) += aspeed-usb-phy.o ++obj-$(CONFIG_ARCH_ASPEED) += aspeed-usb-hp.o ++obj-$(CONFIG_ASPEED_MCTP) += aspeed-mctp.o ++obj-$(CONFIG_ASPEED_DISP_INTF) += aspeed-disp-intf.o ++obj-$(CONFIG_ASPEED_PCIE_MMBI) += aspeed-pcie-mmbi.o ++obj-$(CONFIG_ASPEED_MBOX) += aspeed-mbox.o ++obj-$(CONFIG_AST2700_RTC_OVER_ESPI) += ast2700-rtc-over-espi.o ++obj-$(CONFIG_AST2600_OTP) += ast2600-otp.o ++obj-$(CONFIG_AST2700_OTP) += ast2700-otp.o +diff --git a/drivers/soc/aspeed/aspeed-bmc-dev.c b/drivers/soc/aspeed/aspeed-bmc-dev.c +--- a/drivers/soc/aspeed/aspeed-bmc-dev.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-bmc-dev.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,699 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright (C) ASPEED Technology Inc. ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define SCU_TRIGGER_MSI ++ ++/* AST2600 SCU */ ++#define ASPEED_SCU04 0x04 ++#define AST2600A3_SCU04 0x05030303 ++#define ASPEED_SCUC20 0xC20 ++#define ASPEED_SCUC24 0xC24 ++#define MSI_ROUTING_MASK GENMASK(11, 10) ++#define PCIDEV1_INTX_MSI_HOST2BMC_EN BIT(18) ++#define MSI_ROUTING_PCIe2LPC_PCIDEV0 (0x1 << 10) ++#define MSI_ROUTING_PCIe2LPC_PCIDEV1 (0x2 << 10) ++/* AST2700 SCU */ ++#define SCU0_REVISION_ID 0x0 ++#define REVISION_ID GENMASK(23, 16) ++#define SCU0_PCIE_CONF_CTRL 0x970 ++/* Host2BMC */ ++#define ASPEED_BMC_MEM_BAR 0xF10 ++#define PCIE2PCI_MEM_BAR_ENABLE BIT(1) ++#define HOST2BMC_MEM_BAR_ENABLE BIT(0) ++#define ASPEED_BMC_MEM_BAR_REMAP 0xF18 ++ ++#define ASPEED_BMC_SHADOW_CTRL 0xF50 ++#define READ_ONLY_MASK BIT(31) ++#define MASK_BAR1 BIT(2) ++#define MASK_BAR0 BIT(1) ++#define SHADOW_CFG BIT(0) ++ ++#define ASPEED_BMC_HOST2BMC_Q1 0xA000 ++#define ASPEED_BMC_HOST2BMC_Q2 0xA010 ++#define ASPEED_BMC_BMC2HOST_Q1 0xA020 ++#define ASPEED_BMC_BMC2HOST_Q2 0xA030 ++#define ASPEED_BMC_BMC2HOST_STS 0xA040 ++#define BMC2HOST_INT_STS_DOORBELL BIT(31) ++#define BMC2HOST_ENABLE_INTB BIT(30) ++#define BMC2HOST_Q1_FULL BIT(27) ++#define BMC2HOST_Q1_EMPTY BIT(26) ++#define BMC2HOST_Q2_FULL BIT(25) ++#define BMC2HOST_Q2_EMPTY BIT(24) ++#define BMC2HOST_Q1_FULL_UNMASK BIT(23) ++#define BMC2HOST_Q1_EMPTY_UNMASK BIT(22) ++#define BMC2HOST_Q2_FULL_UNMASK BIT(21) ++#define BMC2HOST_Q2_EMPTY_UNMASK BIT(20) ++ ++#define ASPEED_BMC_HOST2BMC_STS 0xA044 ++#define HOST2BMC_INT_STS_DOORBELL BIT(31) ++#define HOST2BMC_ENABLE_INTB BIT(30) ++#define HOST2BMC_Q1_FULL BIT(27) ++#define HOST2BMC_Q1_EMPTY BIT(26) ++#define HOST2BMC_Q2_FULL BIT(25) ++#define HOST2BMC_Q2_EMPTY BIT(24) ++#define HOST2BMC_Q1_FULL_UNMASK BIT(23) ++#define HOST2BMC_Q1_EMPTY_UNMASK BIT(22) ++#define HOST2BMC_Q2_FULL_UNMASK BIT(21) ++#define HOST2BMC_Q2_EMPTY_UNMASK BIT(20) ++ ++#define ASPEED_SCU_PCIE_CONF_CTRL 0xC20 ++#define SCU_PCIE_CONF_BMC_DEV_EN BIT(8) ++#define SCU_PCIE_CONF_BMC_DEV_EN_MMIO BIT(9) ++#define SCU_PCIE_CONF_BMC_DEV_EN_MSI BIT(11) ++#define SCU_PCIE_CONF_BMC_DEV_EN_IRQ BIT(13) ++#define SCU_PCIE_CONF_BMC_DEV_EN_DMA BIT(14) ++#define SCU_PCIE_CONF_BMC_DEV_EN_E2L BIT(15) ++#define SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE BIT(21) ++ ++#define ASPEED_SCU_BMC_DEV_CLASS 0xC68 ++ ++#define ASPEED_QUEUE_NUM 2 ++enum queue_index { ++ QUEUE1 = 0, ++ QUEUE2, ++}; ++ ++struct aspeed_platform { ++ int (*init)(struct platform_device *pdev); ++ ssize_t (*queue_rx)(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count); ++ ssize_t (*queue_tx)(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count); ++}; ++ ++struct aspeed_queue_message { ++ /* Queue waiters for idle engine */ ++ wait_queue_head_t tx_wait; ++ wait_queue_head_t rx_wait; ++ struct kernfs_node *kn; ++ struct bin_attribute bin; ++ int index; ++ struct aspeed_bmc_device *bmc_device; ++}; ++ ++struct aspeed_bmc_device { ++ unsigned char *host2bmc_base_virt; ++ struct device *dev; ++ struct miscdevice miscdev; ++ int id; ++ void __iomem *reg_base; ++ dma_addr_t bmc_mem_phy; ++ phys_addr_t bmc_mem_size; ++ ++ int pcie2lpc; ++ int irq; ++ ++ struct aspeed_queue_message queue[ASPEED_QUEUE_NUM]; ++ ++ const struct aspeed_platform *platform; ++ ++ /* AST2700 */ ++ struct regmap *device; ++ struct regmap *e2m; ++ ++ struct regmap *scu; ++ int pcie_irq; ++}; ++ ++static struct aspeed_bmc_device *file_aspeed_bmc_device(struct file *file) ++{ ++ return container_of(file->private_data, struct aspeed_bmc_device, ++ miscdev); ++} ++ ++static int aspeed_bmc_device_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct aspeed_bmc_device *bmc_device = file_aspeed_bmc_device(file); ++ unsigned long vsize = vma->vm_end - vma->vm_start; ++ pgprot_t prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vsize) > bmc_device->bmc_mem_size) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (bmc_device->bmc_mem_phy >> PAGE_SHIFT) + vma->vm_pgoff, vsize, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static const struct file_operations aspeed_bmc_device_fops = { ++ .owner = THIS_MODULE, ++ .mmap = aspeed_bmc_device_mmap, ++}; ++ ++static ssize_t aspeed_ast2600_queue_rx(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, char *buf, loff_t off, ++ size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_bmc_device *bmc_device = queue->bmc_device; ++ int index = queue->index; ++ u32 *data = (u32 *)buf; ++ u32 scu_id; ++ int ret; ++ ++ ret = wait_event_interruptible(queue->rx_wait, ++ !(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & ++ ((index == QUEUE1) ? HOST2BMC_Q1_EMPTY : HOST2BMC_Q2_EMPTY))); ++ if (ret) ++ return -EINTR; ++ ++ data[0] = readl(bmc_device->reg_base + ++ ((index == QUEUE1) ? ASPEED_BMC_HOST2BMC_Q1 : ASPEED_BMC_HOST2BMC_Q2)); ++ ++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id); ++ if (scu_id == AST2600A3_SCU04) { ++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ } else { ++ //A0 : BIT(12) A1 : BIT(15) ++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), BIT(15)); ++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), 0); ++ } ++ ++ return sizeof(u32); ++} ++ ++static ssize_t aspeed_ast2600_queue_tx(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, char *buf, loff_t off, ++ size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_bmc_device *bmc_device = queue->bmc_device; ++ int index = queue->index; ++ u32 tx_buff; ++ u32 scu_id; ++ int ret; ++ ++ if (count != sizeof(u32)) ++ return -EINVAL; ++ ++ ret = wait_event_interruptible(queue->tx_wait, ++ !(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & ++ ((index == QUEUE1) ? BMC2HOST_Q1_FULL : BMC2HOST_Q2_FULL))); ++ if (ret) ++ return -EINTR; ++ ++ memcpy(&tx_buff, buf, 4); ++ writel(tx_buff, bmc_device->reg_base + ((index == QUEUE1) ? ASPEED_BMC_BMC2HOST_Q1 : ++ ASPEED_BMC_BMC2HOST_Q2)); ++ ++ /* trigger to host ++ * Only After AST2600A3 support DoorBell MSI ++ */ ++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id); ++ if (scu_id == AST2600A3_SCU04) { ++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ } else { ++ //A0 : BIT(12) A1 : BIT(15) ++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), BIT(15)); ++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), 0); ++ } ++ ++ return sizeof(u32); ++} ++ ++static ssize_t aspeed_ast2700_queue_rx(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, char *buf, loff_t off, ++ size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_bmc_device *bmc_device = queue->bmc_device; ++ int index = queue->index; ++ u32 *data = (u32 *)buf; ++ int ret; ++ ++ ret = wait_event_interruptible(queue->rx_wait, ++ !(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & ++ ((index == QUEUE1) ? HOST2BMC_Q1_EMPTY : HOST2BMC_Q2_EMPTY))); ++ if (ret) ++ return -EINTR; ++ ++ data[0] = readl(bmc_device->reg_base + ++ ((index == QUEUE1) ? ASPEED_BMC_HOST2BMC_Q1 : ASPEED_BMC_HOST2BMC_Q2)); ++ ++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ ++ return sizeof(u32); ++} ++ ++static ssize_t aspeed_ast2700_queue_tx(struct file *filp, struct kobject *kobj, ++ struct bin_attribute *attr, char *buf, loff_t off, ++ size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_bmc_device *bmc_device = queue->bmc_device; ++ int index = queue->index; ++ u32 tx_buff; ++ int ret; ++ ++ if (count != sizeof(u32)) ++ return -EINVAL; ++ ++ ret = wait_event_interruptible(queue->tx_wait, ++ !(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & ++ ((index == QUEUE1) ? BMC2HOST_Q1_FULL : BMC2HOST_Q2_FULL))); ++ if (ret) ++ return -EINTR; ++ ++ memcpy(&tx_buff, buf, 4); ++ writel(tx_buff, bmc_device->reg_base + ((index == QUEUE1) ? ASPEED_BMC_BMC2HOST_Q1 : ++ ASPEED_BMC_BMC2HOST_Q2)); ++ ++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ ++ return sizeof(u32); ++} ++ ++/* AST2600 */ ++static irqreturn_t aspeed_bmc_dev_pcie_isr(int irq, void *dev_id) ++{ ++ struct aspeed_bmc_device *bmc_device = dev_id; ++ ++ while (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q1_EMPTY)) ++ readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_Q1); ++ ++ while (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q2_EMPTY)) ++ readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_Q2); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_bmc_dev_isr(int irq, void *dev_id) ++{ ++ struct aspeed_bmc_device *bmc_device = dev_id; ++ u32 host2bmc_q_sts = readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS); ++ ++ if (host2bmc_q_sts & HOST2BMC_INT_STS_DOORBELL) ++ writel(HOST2BMC_INT_STS_DOORBELL, bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS); ++ ++ if (host2bmc_q_sts & HOST2BMC_ENABLE_INTB) ++ writel(HOST2BMC_ENABLE_INTB, bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS); ++ ++ if (host2bmc_q_sts & HOST2BMC_Q1_FULL) ++ dev_info(bmc_device->dev, "Q1 Full\n"); ++ ++ if (host2bmc_q_sts & HOST2BMC_Q2_FULL) ++ dev_info(bmc_device->dev, "Q2 Full\n"); ++ ++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & BMC2HOST_Q1_FULL)) ++ wake_up_interruptible(&bmc_device->queue[QUEUE1].tx_wait); ++ ++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q1_EMPTY)) ++ wake_up_interruptible(&bmc_device->queue[QUEUE1].rx_wait); ++ ++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & BMC2HOST_Q2_FULL)) ++ wake_up_interruptible(&bmc_device->queue[QUEUE2].tx_wait); ++ ++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q2_EMPTY)) ++ wake_up_interruptible(&bmc_device->queue[QUEUE2].rx_wait); ++ ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_ast2600_init(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ u32 pcie_config_ctl = SCU_PCIE_CONF_BMC_DEV_EN_IRQ | ++ SCU_PCIE_CONF_BMC_DEV_EN_MMIO | SCU_PCIE_CONF_BMC_DEV_EN; ++ u32 scu_id; ++ ++ bmc_device->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(bmc_device->scu)) { ++ dev_err(&pdev->dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(bmc_device->scu); ++ } ++ ++ if (bmc_device->pcie2lpc) ++ pcie_config_ctl |= SCU_PCIE_CONF_BMC_DEV_EN_E2L | ++ SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE; ++ ++ regmap_update_bits(bmc_device->scu, ASPEED_SCU_PCIE_CONF_CTRL, ++ pcie_config_ctl, pcie_config_ctl); ++ ++ /* update class code to others as it is a MFD device */ ++ regmap_write(bmc_device->scu, ASPEED_SCU_BMC_DEV_CLASS, 0xff000000); ++ ++#ifdef SCU_TRIGGER_MSI ++ //SCUC24[17]: Enable PCI device 1 INTx/MSI from SCU560[15]. Will be added in next version ++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC20, BIT(11) | BIT(14), BIT(11) | BIT(14)); ++ ++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id); ++ if (scu_id == AST2600A3_SCU04) ++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC24, ++ PCIDEV1_INTX_MSI_HOST2BMC_EN | MSI_ROUTING_MASK, ++ PCIDEV1_INTX_MSI_HOST2BMC_EN | MSI_ROUTING_PCIe2LPC_PCIDEV1); ++ else ++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC24, ++ BIT(17) | BIT(14) | BIT(11), BIT(17) | BIT(14) | BIT(11)); ++#else ++ //SCUC24[18]: Enable PCI device 1 INTx/MSI from Host-to-BMC controller. ++ regmap_update_bits(bmc_device->scu, 0xc24, BIT(18) | BIT(14), BIT(18) | BIT(14)); ++#endif ++ ++ writel((~(bmc_device->bmc_mem_size - 1) & 0xFFFFFFFF) | HOST2BMC_MEM_BAR_ENABLE, ++ bmc_device->reg_base + ASPEED_BMC_MEM_BAR); ++ writel(bmc_device->bmc_mem_phy, bmc_device->reg_base + ASPEED_BMC_MEM_BAR_REMAP); ++ ++ //Setting BMC to Host Q register ++ writel(BMC2HOST_Q2_FULL_UNMASK | BMC2HOST_Q1_FULL_UNMASK | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ writel(HOST2BMC_Q2_FULL_UNMASK | HOST2BMC_Q1_FULL_UNMASK | HOST2BMC_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS); ++ ++ return 0; ++} ++ ++static int aspeed_ast2700_init(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ u32 pcie_config_ctl; ++ u32 scu_id; ++ int i; ++ ++ bmc_device->device = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,device"); ++ if (IS_ERR(bmc_device->device)) { ++ dev_err(&pdev->dev, "failed to find device regmap\n"); ++ return PTR_ERR(bmc_device->device); ++ } ++ ++ bmc_device->e2m = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,e2m"); ++ if (IS_ERR(bmc_device->e2m)) { ++ dev_err(&pdev->dev, "failed to find e2m regmap\n"); ++ return PTR_ERR(bmc_device->e2m); ++ } ++ ++ bmc_device->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(bmc_device->scu)) { ++ dev_err(&pdev->dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(bmc_device->scu); ++ } ++ ++ if (bmc_device->pcie2lpc) { ++ pcie_config_ctl = SCU_PCIE_CONF_BMC_DEV_EN_E2L | ++ SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE; ++ regmap_update_bits(bmc_device->scu, SCU0_PCIE_CONF_CTRL, ++ pcie_config_ctl, pcie_config_ctl); ++ } ++ ++ /* update class code to others as it is a MFD device */ ++ regmap_write(bmc_device->device, 0x18, 0xff000027); ++ ++ /* MSI */ ++ regmap_update_bits(bmc_device->device, 0x74, GENMASK(7, 4), BIT(7) | (5 << 4)); ++ /* EnPCIaMSI:BIT(25), EnPCIaIntA:BIT(17), EnPCIaMst:BIT(9), EnPCIaDev:BIT(1) */ ++ regmap_read(bmc_device->scu, SCU0_REVISION_ID, &scu_id); ++ if (scu_id & REVISION_ID) ++ regmap_update_bits(bmc_device->device, 0x70, ++ BIT(25) | BIT(17) | BIT(9) | BIT(1), ++ BIT(25) | BIT(17) | BIT(9) | BIT(1)); ++ else ++ /* Disable MSI[bit25] in ast2700A0 int only */ ++ regmap_update_bits(bmc_device->device, 0x70, ++ BIT(17) | BIT(9) | BIT(1), ++ BIT(25) | BIT(17) | BIT(9) | BIT(1)); ++ ++ /* bar size check for 4k align */ ++ for (i = 1; i < 16; i++) { ++ if ((bmc_device->bmc_mem_size / 4096) == (1 << (i - 1))) ++ break; ++ } ++ if (i == 16) { ++ i = 0; ++ dev_warn(bmc_device->dev, ++ "Bar size not align for 4K : %dK\n", (u32)bmc_device->bmc_mem_size / 1024); ++ } ++ ++ /* ++ * BAR assign in scu ++ * ((bar_mem / 4k) << 8) | per_size ++ */ ++ regmap_write(bmc_device->device, 0x1c, ((bmc_device->bmc_mem_phy) >> 4) | i); ++ ++ if (bmc_device->id == 0) ++ /* Node 0 Bar 0 */ ++ regmap_write(bmc_device->e2m, 0x108, ((bmc_device->bmc_mem_phy) >> 4) | i); ++ else ++ /* Node 1 Bar 0 */ ++ regmap_write(bmc_device->e2m, 0x128, ((bmc_device->bmc_mem_phy) >> 4) | i); ++ ++ /* Setting BMC to Host Q register */ ++ writel(BMC2HOST_Q2_FULL_UNMASK | BMC2HOST_Q1_FULL_UNMASK | BMC2HOST_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS); ++ writel(HOST2BMC_Q2_FULL_UNMASK | HOST2BMC_Q1_FULL_UNMASK | HOST2BMC_ENABLE_INTB, ++ bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS); ++ ++ return 0; ++} ++ ++static int aspeed_bmc_device_setup_queue(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ int ret, i; ++ ++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) { ++ struct aspeed_queue_message *queue = &bmc_device->queue[i]; ++ ++ init_waitqueue_head(&queue->tx_wait); ++ init_waitqueue_head(&queue->rx_wait); ++ ++ sysfs_bin_attr_init(&queue->bin); ++ ++ /* Queue name index starts from 1 */ ++ queue->bin.attr.name = ++ devm_kasprintf(dev, GFP_KERNEL, "bmc-dev-queue%d", (i + 1)); ++ queue->bin.attr.mode = 0600; ++ queue->bin.read = bmc_device->platform->queue_rx; ++ queue->bin.write = bmc_device->platform->queue_tx; ++ queue->bin.size = 4; ++ queue->bin.private = queue; ++ ++ ret = sysfs_create_bin_file(&pdev->dev.kobj, &queue->bin); ++ if (ret) { ++ dev_err(dev, "error for bin%d file\n", i); ++ return ret; ++ } ++ ++ queue->kn = kernfs_find_and_get(dev->kobj.sd, queue->bin.attr.name); ++ if (!queue->kn) { ++ sysfs_remove_bin_file(&dev->kobj, &queue->bin); ++ return ret; ++ } ++ ++ queue->index = i; ++ queue->bmc_device = bmc_device; ++ } ++ ++ return 0; ++} ++ ++static int aspeed_bmc_device_setup_memory_mapping(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ bmc_device->miscdev.minor = MISC_DYNAMIC_MINOR; ++ bmc_device->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "bmc-device%d", bmc_device->id); ++ bmc_device->miscdev.fops = &aspeed_bmc_device_fops; ++ bmc_device->miscdev.parent = dev; ++ ret = misc_register(&bmc_device->miscdev); ++ if (ret) { ++ dev_err(dev, "Unable to register device\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct aspeed_platform ast2600_plaform = { ++ .init = aspeed_ast2600_init, ++ .queue_rx = aspeed_ast2600_queue_rx, ++ .queue_tx = aspeed_ast2600_queue_tx ++}; ++ ++static struct aspeed_platform ast2700_plaform = { ++ .init = aspeed_ast2700_init, ++ .queue_rx = aspeed_ast2700_queue_rx, ++ .queue_tx = aspeed_ast2700_queue_tx ++}; ++ ++static const struct of_device_id aspeed_bmc_device_of_matches[] = { ++ { .compatible = "aspeed,ast2600-bmc-device", .data = &ast2600_plaform }, ++ { .compatible = "aspeed,ast2700-bmc-device", .data = &ast2700_plaform }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, aspeed_bmc_device_of_matches); ++ ++static int aspeed_bmc_device_probe(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device; ++ struct device *dev = &pdev->dev; ++ struct resource res; ++ const void *md = of_device_get_match_data(dev); ++ struct device_node *np; ++ int ret = 0, i; ++ ++ if (!md) ++ return -ENODEV; ++ ++ bmc_device = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_bmc_device), GFP_KERNEL); ++ if (!bmc_device) ++ return -ENOMEM; ++ dev_set_drvdata(dev, bmc_device); ++ ++ bmc_device->platform = md; ++ ++ bmc_device->id = of_alias_get_id(dev->of_node, "bmcdev"); ++ if (bmc_device->id < 0) ++ bmc_device->id = 0; ++ ++ bmc_device->dev = dev; ++ bmc_device->reg_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(bmc_device->reg_base)) ++ goto out_region; ++ ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ goto out_region; ++ } ++ ++ np = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (!np || of_address_to_resource(np, 0, &res)) { ++ dev_err(dev, "Failed to find memory-region.\n"); ++ ret = -ENOMEM; ++ goto out_region; ++ } ++ ++ of_node_put(np); ++ ++ bmc_device->bmc_mem_phy = res.start; ++ bmc_device->bmc_mem_size = resource_size(&res); ++ ++ bmc_device->irq = platform_get_irq(pdev, 0); ++ if (bmc_device->irq < 0) { ++ dev_err(&pdev->dev, "platform get of irq[=%d] failed!\n", bmc_device->irq); ++ goto out_unmap; ++ } ++ ret = devm_request_irq(&pdev->dev, bmc_device->irq, aspeed_bmc_dev_isr, 0, ++ dev_name(&pdev->dev), bmc_device); ++ if (ret) { ++ dev_err(dev, "aspeed bmc device Unable to get IRQ"); ++ goto out_unmap; ++ } ++ ++ ret = aspeed_bmc_device_setup_queue(pdev); ++ if (ret) { ++ dev_err(dev, "Cannot setup queue message"); ++ goto out_irq; ++ } ++ ++ ret = aspeed_bmc_device_setup_memory_mapping(pdev); ++ if (ret) { ++ dev_err(dev, "Cannot setup memory mapping misc"); ++ goto out_free_queue; ++ } ++ ++ if (of_property_read_bool(dev->of_node, "pcie2lpc")) ++ bmc_device->pcie2lpc = 1; ++ ++ ret = bmc_device->platform->init(pdev); ++ if (ret) { ++ dev_err(dev, "Initialize bmc device failed\n"); ++ goto out_free_misc; ++ } ++ ++ bmc_device->pcie_irq = platform_get_irq(pdev, 1); ++ if (bmc_device->pcie_irq < 0) { ++ dev_warn(&pdev->dev, ++ "platform get of pcie irq[=%d] failed!\n", bmc_device->pcie_irq); ++ } else { ++ ret = devm_request_irq(&pdev->dev, bmc_device->pcie_irq, ++ aspeed_bmc_dev_pcie_isr, IRQF_SHARED, ++ dev_name(&pdev->dev), bmc_device); ++ if (ret < 0) { ++ dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", ret); ++ bmc_device->pcie_irq = -1; ++ } ++ } ++ ++ dev_info(dev, "aspeed bmc device: driver successfully loaded.\n"); ++ ++ return 0; ++ ++out_free_misc: ++ misc_deregister(&bmc_device->miscdev); ++out_free_queue: ++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) ++ sysfs_remove_bin_file(&pdev->dev.kobj, &bmc_device->queue[i].bin); ++out_irq: ++ devm_free_irq(&pdev->dev, bmc_device->irq, bmc_device); ++out_unmap: ++ iounmap(bmc_device->reg_base); ++out_region: ++ devm_kfree(&pdev->dev, bmc_device); ++ dev_warn(dev, "aspeed bmc device: driver init failed (ret=%d)!\n", ret); ++ return ret; ++} ++ ++static void aspeed_bmc_device_remove(struct platform_device *pdev) ++{ ++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev); ++ int i; ++ ++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) ++ sysfs_remove_bin_file(&pdev->dev.kobj, &bmc_device->queue[i].bin); ++ misc_deregister(&bmc_device->miscdev); ++ devm_free_irq(&pdev->dev, bmc_device->irq, bmc_device); ++ devm_free_irq(&pdev->dev, bmc_device->pcie_irq, bmc_device); ++ ++ iounmap(bmc_device->reg_base); ++ ++ devm_kfree(&pdev->dev, bmc_device); ++} ++ ++static struct platform_driver aspeed_bmc_device_driver = { ++ .probe = aspeed_bmc_device_probe, ++ .remove = aspeed_bmc_device_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_bmc_device_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_bmc_device_driver); ++ ++MODULE_AUTHOR("Ryan Chen "); ++MODULE_DESCRIPTION("ASPEED BMC DEVICE Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/aspeed-disp-intf.c b/drivers/soc/aspeed/aspeed-disp-intf.c +--- a/drivers/soc/aspeed/aspeed-disp-intf.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-disp-intf.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,255 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright (C) ASPEED Technology Inc. ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "aspeed-disp-intf" ++ ++#define AST2700_SCU_CHIP_ID 0x0 ++#define SCU_CPU_REVISION_ID_HW GENMASK(23, 16) ++ ++#define AST2700_SCU_PIN_SEL 0x414 ++#define AST2700_SCU_D1PLL_SEL GENMASK(13, 12) ++#define AST2700_SCU_DAC_SRC_SEL GENMASK(11, 10) ++#define AST2700_SCU_DP_SRC_SEL GENMASK(9, 8) ++ ++#define AST2600_SCU_PIN_SEL 0x0C0 ++#define AST2600_SCU_DP_SRC_SEL BIT(18) ++#define AST2600_SCU_DAC_SRC_SEL BIT(16) ++ ++struct aspeed_disp_intf_config { ++ u8 version; ++ u32 dac_src_sel; ++ u32 dac_src_max; ++ u32 dac_src_min; ++ u32 dp_src_sel; ++ u32 dp_src_max; ++ u32 dp_src_min; ++}; ++ ++struct aspeed_disp_intf { ++ struct device *dev; ++ struct miscdevice miscdev; ++ struct regmap *scu; ++ const struct aspeed_disp_intf_config *config; ++}; ++ ++static int dac_src, dp_src; ++ ++static const struct aspeed_disp_intf_config ast2600_config = { ++ .version = 6, ++ .dac_src_sel = AST2600_SCU_PIN_SEL, ++ .dac_src_max = 1, ++ .dac_src_min = 0, ++ .dp_src_sel = AST2600_SCU_PIN_SEL, ++ .dp_src_max = 1, ++ .dp_src_min = 0, ++}; ++ ++static const struct aspeed_disp_intf_config ast2700_config = { ++ .version = 7, ++ .dac_src_sel = AST2700_SCU_PIN_SEL, ++ .dac_src_max = 2, ++ .dac_src_min = 0, ++ .dp_src_sel = AST2700_SCU_PIN_SEL, ++ .dp_src_max = 2, ++ .dp_src_min = 0, ++}; ++ ++static ssize_t dac_src_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev); ++ const struct aspeed_disp_intf_config *config = intf->config; ++ u32 val; ++ ++ regmap_read(intf->scu, config->dac_src_sel, &val); ++ dac_src = (config->version == 6) ++ ? FIELD_GET(AST2600_SCU_DAC_SRC_SEL, val) ++ : FIELD_GET(AST2700_SCU_DAC_SRC_SEL, val); ++ return sysfs_emit(buf, "%d\n", dac_src); ++} ++ ++static ssize_t dac_src_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, size_t count) ++{ ++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev); ++ const struct aspeed_disp_intf_config *config = intf->config; ++ int src, res; ++ ++ res = kstrtoint(buf, 0, &src); ++ if (res) ++ return res; ++ ++ if (src < config->dac_src_min || src > config->dac_src_max) { ++ dev_err(intf->dev, "Invalid dac_src(max:%d, min:%d)\n", ++ config->dac_src_max, config->dac_src_min); ++ return -1; ++ } ++ ++ dac_src = src; ++ if (config->version == 6) { ++ regmap_update_bits(intf->scu, config->dac_src_sel, AST2600_SCU_DAC_SRC_SEL, ++ FIELD_PREP(AST2600_SCU_DAC_SRC_SEL, src)); ++ } else { ++ u32 id; ++ u32 mask = AST2700_SCU_DAC_SRC_SEL; ++ u32 val = FIELD_PREP(AST2700_SCU_DAC_SRC_SEL, src); ++ ++ // D1PLL used in A0 only ++ regmap_read(intf->scu, AST2700_SCU_CHIP_ID, &id); ++ if (FIELD_GET(SCU_CPU_REVISION_ID_HW, id) != 0) { ++ mask |= AST2700_SCU_D1PLL_SEL; ++ val |= FIELD_PREP(AST2700_SCU_D1PLL_SEL, src); ++ } ++ ++ regmap_update_bits(intf->scu, config->dac_src_sel, mask, val); ++ } ++ return count; ++} ++ ++static DEVICE_ATTR_RW(dac_src); ++ ++static ssize_t dp_src_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev); ++ const struct aspeed_disp_intf_config *config = intf->config; ++ u32 val; ++ ++ regmap_read(intf->scu, config->dp_src_sel, &val); ++ dp_src = (config->version == 6) ++ ? FIELD_GET(AST2600_SCU_DP_SRC_SEL, val) ++ : FIELD_GET(AST2700_SCU_DP_SRC_SEL, val); ++ return sysfs_emit(buf, "%d\n", dp_src); ++} ++ ++static ssize_t dp_src_store(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev); ++ const struct aspeed_disp_intf_config *config = intf->config; ++ int src, res; ++ ++ res = kstrtoint(buf, 0, &src); ++ if (res) ++ return res; ++ ++ if (src < config->dp_src_min || src > config->dp_src_max) { ++ dev_err(intf->dev, "Invalid dp_src(max:%d, min:%d)\n", ++ config->dp_src_max, config->dp_src_min); ++ return -1; ++ } ++ ++ dp_src = src; ++ if (config->version == 6) { ++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2600_SCU_DP_SRC_SEL, ++ FIELD_PREP(AST2600_SCU_DP_SRC_SEL, src)); ++ } else { ++ u32 val; ++ ++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2700_SCU_DP_SRC_SEL, ++ FIELD_PREP(AST2700_SCU_DP_SRC_SEL, src)); ++ ++ // D1PLL used in A0 only ++ regmap_read(intf->scu, AST2700_SCU_CHIP_ID, &val); ++ if (FIELD_GET(SCU_CPU_REVISION_ID_HW, val) == 0) { ++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2700_SCU_D1PLL_SEL, ++ FIELD_PREP(AST2700_SCU_D1PLL_SEL, src)); ++ } ++ } ++ ++ return count; ++} ++ ++static DEVICE_ATTR_RW(dp_src); ++ ++static struct attribute *aspeed_disp_intf_attrs[] = { ++ &dev_attr_dac_src.attr, ++ &dev_attr_dp_src.attr, ++ NULL, ++}; ++ ++static const struct attribute_group aspeed_disp_intf_attgrp = { ++ .name = NULL, ++ .attrs = aspeed_disp_intf_attrs, ++}; ++ ++static int aspeed_disp_intf_probe(struct platform_device *pdev) ++{ ++ struct aspeed_disp_intf *intf; ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ intf = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_disp_intf), GFP_KERNEL); ++ if (!intf) ++ return -ENOMEM; ++ ++ dev_set_drvdata(&pdev->dev, intf); ++ ++ intf->config = of_device_get_match_data(&pdev->dev); ++ if (!intf->config) ++ return -ENODEV; ++ ++ intf->dev = dev; ++ intf->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); ++ if (IS_ERR(intf->scu)) { ++ dev_err(dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(intf->scu); ++ } ++ ++ intf->miscdev.minor = MISC_DYNAMIC_MINOR; ++ intf->miscdev.name = DEVICE_NAME; ++ intf->miscdev.parent = dev; ++ ret = misc_register(&intf->miscdev); ++ if (ret) { ++ dev_err(dev, "Unable to register device\n"); ++ return ret; ++ } ++ ++ ret = sysfs_create_group(&dev->kobj, &aspeed_disp_intf_attgrp); ++ if (ret != 0) ++ dev_warn(dev, "failed to register attributes\n"); ++ ++ return 0; ++} ++ ++static void aspeed_disp_intf_remove(struct platform_device *pdev) ++{ ++ struct aspeed_disp_intf *intf = platform_get_drvdata(pdev); ++ ++ sysfs_remove_group(&intf->dev->kobj, &aspeed_disp_intf_attgrp); ++ misc_deregister(&intf->miscdev); ++ devm_kfree(&pdev->dev, intf); ++} ++ ++static const struct of_device_id aspeed_disp_intf_of_matches[] = { ++ { .compatible = "aspeed,ast2600-disp-intf", .data = &ast2600_config }, ++ { .compatible = "aspeed,ast2700-disp-intf", .data = &ast2700_config }, ++ {}, ++}; ++ ++static struct platform_driver aspeed_disp_intf_driver = { ++ .probe = aspeed_disp_intf_probe, ++ .remove = aspeed_disp_intf_remove, ++ .driver = { ++ .name = DEVICE_NAME, ++ .of_match_table = aspeed_disp_intf_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_disp_intf_driver); ++ ++MODULE_DEVICE_TABLE(of, aspeed_disp_intf_of_matches); ++MODULE_AUTHOR("Jammy Huang "); ++MODULE_DESCRIPTION("ASPEED Display Interface Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/aspeed-espi-comm.h b/drivers/soc/aspeed/aspeed-espi-comm.h +--- a/drivers/soc/aspeed/aspeed-espi-comm.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-espi-comm.h 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,206 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#ifndef __ASPEED_ESPI_COMM_H__ ++#define __ASPEED_ESPI_COMM_H__ ++ ++#include ++#include ++ ++/* ++ * eSPI cycle type encoding ++ * ++ * Section 5.1 Cycle Types and Packet Format, ++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016. ++ */ ++#define ESPI_PERIF_MEMRD32 0x00 ++#define ESPI_PERIF_MEMRD64 0x02 ++#define ESPI_PERIF_MEMWR32 0x01 ++#define ESPI_PERIF_MEMWR64 0x03 ++#define ESPI_PERIF_MSG 0x10 ++#define ESPI_PERIF_MSG_D 0x11 ++#define ESPI_PERIF_SUC_CMPLT 0x06 ++#define ESPI_PERIF_SUC_CMPLT_D_MIDDLE 0x09 ++#define ESPI_PERIF_SUC_CMPLT_D_FIRST 0x0b ++#define ESPI_PERIF_SUC_CMPLT_D_LAST 0x0d ++#define ESPI_PERIF_SUC_CMPLT_D_ONLY 0x0f ++#define ESPI_PERIF_UNSUC_CMPLT 0x0c ++#define ESPI_OOB_MSG 0x21 ++#define ESPI_FLASH_READ 0x00 ++#define ESPI_FLASH_WRITE 0x01 ++#define ESPI_FLASH_ERASE 0x02 ++#define ESPI_FLASH_SUC_CMPLT 0x06 ++#define ESPI_FLASH_SUC_CMPLT_D_MIDDLE 0x09 ++#define ESPI_FLASH_SUC_CMPLT_D_FIRST 0x0b ++#define ESPI_FLASH_SUC_CMPLT_D_LAST 0x0d ++#define ESPI_FLASH_SUC_CMPLT_D_ONLY 0x0f ++#define ESPI_FLASH_UNSUC_CMPLT 0x0c ++ ++/* ++ * eSPI packet format structure ++ * ++ * Section 5.1 Cycle Types and Packet Format, ++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016. ++ */ ++struct espi_comm_hdr { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++}; ++ ++struct espi_perif_mem32 { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint32_t addr_be; ++ uint8_t data[]; ++} __packed; ++ ++struct espi_perif_mem64 { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint32_t addr_be; ++ uint8_t data[]; ++} __packed; ++ ++struct espi_perif_msg { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint8_t msg_code; ++ uint8_t msg_byte[4]; ++ uint8_t data[]; ++} __packed; ++ ++struct espi_perif_cmplt { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint8_t data[]; ++} __packed; ++ ++struct espi_oob_msg { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint8_t data[]; ++}; ++ ++struct espi_flash_rwe { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint32_t addr_be; ++ uint8_t data[]; ++} __packed; ++ ++struct espi_flash_cmplt { ++ uint8_t cyc; ++ uint8_t len_h : 4; ++ uint8_t tag : 4; ++ uint8_t len_l; ++ uint8_t data[]; ++} __packed; ++ ++#define ESPI_MAX_PLD_LEN BIT(12) ++ ++/* ++ * Aspeed IOCTL for eSPI raw packet send/receive ++ * ++ * This IOCTL interface works in the eSPI packet in/out paradigm. ++ * ++ * Only the virtual wire IOCTL is a special case which does not send ++ * or receive an eSPI packet. However, to keep a more consisten use from ++ * userspace, we make all of the four channel drivers serve through the ++ * IOCTL interface. ++ * ++ * For the eSPI packet format, refer to ++ * Section 5.1 Cycle Types and Packet Format, ++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016. ++ * ++ * For the example user apps using these IOCTL, refer to ++ * https://github.com/AspeedTech-BMC/aspeed_app/tree/master/espi_test ++ */ ++#define __ASPEED_ESPI_IOCTL_MAGIC 0xb8 ++ ++/* ++ * we choose the longest header and the max payload size ++ * based on the Intel specification to define the maximum ++ * eSPI packet length ++ */ ++#define ESPI_MAX_PKT_LEN (sizeof(struct espi_perif_msg) + ESPI_MAX_PLD_LEN) ++ ++struct aspeed_espi_ioc { ++ uint32_t pkt_len; ++ uint8_t *pkt; ++}; ++ ++/* ++ * Peripheral Channel (CH0) ++ * - ASPEED_ESPI_PERIF_PC_GET_RX ++ * Receive an eSPI Posted/Completion packet ++ * - ASPEED_ESPI_PERIF_PC_PUT_TX ++ * Transmit an eSPI Posted/Completion packet ++ * - ASPEED_ESPI_PERIF_NP_PUT_TX ++ * Transmit an eSPI Non-Posted packet ++ */ ++#define ASPEED_ESPI_PERIF_PC_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x00, struct aspeed_espi_ioc) ++#define ASPEED_ESPI_PERIF_PC_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x01, struct aspeed_espi_ioc) ++#define ASPEED_ESPI_PERIF_NP_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x02, struct aspeed_espi_ioc) ++/* ++ * Virtual Wire Channel (CH1) ++ * - ASPEED_ESPI_VW_GET_GPIO_VAL ++ * Read the input value of GPIO over the VW channel ++ * - ASPEED_ESPI_VW_PUT_GPIO_VAL ++ * Write the output value of GPIO over the VW channel ++ * - ASPEED_ESPI_VW_GET_GPIO_VAL1 (new feature in AST2700) ++ * Read the input value1 of GPIO over the VW channel ++ * - ASPEED_ESPI_VW_PUT_GPIO_VAL1 (new feature in AST2700) ++ * Write the output value1 of GPIO over the VW channel ++ */ ++#define ASPEED_ESPI_VW_GET_GPIO_VAL _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x10, uint32_t) ++#define ASPEED_ESPI_VW_PUT_GPIO_VAL _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x11, uint32_t) ++#ifdef CONFIG_ARM64 ++#define ASPEED_ESPI_VW_GET_GPIO_VAL1 _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x12, uint32_t) ++#define ASPEED_ESPI_VW_PUT_GPIO_VAL1 _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x13, uint32_t) ++#endif ++/* ++ * Out-of-band Channel (CH2) ++ * - ASPEED_ESPI_OOB_GET_RX ++ * Receive an eSPI OOB packet ++ * - ASPEED_ESPI_OOB_PUT_TX ++ * Transmit an eSPI OOB packet ++ */ ++#define ASPEED_ESPI_OOB_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x20, struct aspeed_espi_ioc) ++#define ASPEED_ESPI_OOB_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x21, struct aspeed_espi_ioc) ++/* ++ * Flash Channel (CH3) ++ * - ASPEED_ESPI_FLASH_GET_RX ++ * Receive an eSPI flash packet ++ * - ASPEED_ESPI_FLASH_PUT_TX ++ * Transmit an eSPI flash packet ++ */ ++#define ASPEED_ESPI_FLASH_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x30, struct aspeed_espi_ioc) ++#define ASPEED_ESPI_FLASH_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \ ++ 0x31, struct aspeed_espi_ioc) ++ ++#endif +diff --git a/drivers/soc/aspeed/aspeed-host-bmc-dev.c b/drivers/soc/aspeed/aspeed-host-bmc-dev.c +--- a/drivers/soc/aspeed/aspeed-host-bmc-dev.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-host-bmc-dev.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,1435 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright (C) ASPEED Technology Inc. ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "aspeed-pcie-mmbi.h" ++ ++#define PCI_BMC_HOST2BMC_Q1 0x30000 ++#define PCI_BMC_HOST2BMC_Q2 0x30010 ++#define PCI_BMC_BMC2HOST_Q1 0x30020 ++#define PCI_BMC_BMC2HOST_Q2 0x30030 ++#define PCI_BMC_BMC2HOST_STS 0x30040 ++#define BMC2HOST_INT_STS_DOORBELL BIT(31) ++#define BMC2HOST_ENABLE_INTB BIT(30) ++ ++#define BMC2HOST_Q1_FULL BIT(27) ++#define BMC2HOST_Q1_EMPTY BIT(26) ++#define BMC2HOST_Q2_FULL BIT(25) ++#define BMC2HOST_Q2_EMPTY BIT(24) ++#define BMC2HOST_Q1_FULL_UNMASK BIT(23) ++#define BMC2HOST_Q1_EMPTY_UNMASK BIT(22) ++#define BMC2HOST_Q2_FULL_UNMASK BIT(21) ++#define BMC2HOST_Q2_EMPTY_UNMASK BIT(20) ++ ++#define PCI_BMC_HOST2BMC_STS 0x30044 ++#define HOST2BMC_INT_STS_DOORBELL BIT(31) ++#define HOST2BMC_ENABLE_INTB BIT(30) ++ ++#define HOST2BMC_Q1_FULL BIT(27) ++#define HOST2BMC_Q1_EMPTY BIT(26) ++#define HOST2BMC_Q2_FULL BIT(25) ++#define HOST2BMC_Q2_EMPTY BIT(24) ++#define HOST2BMC_Q1_FULL_UNMASK BIT(23) ++#define HOST2BMC_Q1_EMPTY_UNMASK BIT(22) ++#define HOST2BMC_Q2_FULL_UNMASK BIT(21) ++#define HOST2BMC_Q2_EMPTY_UNMASK BIT(20) ++ ++static DEFINE_IDA(bmc_device_ida); ++ ++#define MMBI_MAX_INST 6 ++#define VUART_MAX_PARMS 2 ++#define ASPEED_QUEUE_NUM 2 ++#define MAX_MSI_NUM 8 ++ ++enum aspeed_platform_id { ++ ASPEED, ++ ASPEED_AST2700_SOC1, ++}; ++ ++enum queue_index { ++ QUEUE1 = 0, ++ QUEUE2, ++}; ++ ++enum msi_index { ++ BMC_MSI, ++ MBX_MSI, ++ VUART0_MSI, ++ VUART1_MSI, ++ MMBI0_MSI, ++ MMBI1_MSI, ++ MMBI2_MSI, ++ MMBI3_MSI, ++}; ++ ++/* Match msi_index */ ++static int ast2600_msi_idx_table[MAX_MSI_NUM] = { 4, 21, 16, 15 }; ++static int ast2700_soc0_msi_idx_table[MAX_MSI_NUM] = { 0, 11, 6, 5, 28, 29, 30, 31 }; ++/* ARRAY = MMIB0_MSI, MMBI1_MSI, MMBI2_MSI, MMBI3_MSI, MMBI4_MSI, MMBI5_MSI */ ++static int ast2700_soc1_msi_idx_table[MAX_MSI_NUM] = { 1, 2, 3, 4, 5, 6 }; ++ ++struct aspeed_platform { ++ int (*setup)(struct pci_dev *pdev); ++}; ++ ++struct aspeed_queue_message { ++ /* Queue waiters for idle engine */ ++ wait_queue_head_t tx_wait; ++ wait_queue_head_t rx_wait; ++ struct kernfs_node *kn; ++ struct bin_attribute bin; ++ int index; ++ struct aspeed_pci_bmc_dev *pci_bmc_device; ++}; ++ ++struct aspeed_pcie_mmbi { ++ resource_size_t base; ++ resource_size_t mem_size; ++ void __iomem *mem; ++ u32 segment_size; ++ int irq; ++ int id; ++ struct aspeed_mmbi_channel chan; ++ const char *dev_name; ++}; ++ ++struct aspeed_pci_bmc_dev { ++ struct device *dev; ++ struct miscdevice miscdev; ++ struct aspeed_platform *platform; ++ kernel_ulong_t driver_data; ++ int id; ++ ++ unsigned long mem_bar_base; ++ unsigned long mem_bar_size; ++ void __iomem *mem_bar_reg; ++ ++ unsigned long message_bar_base; ++ unsigned long message_bar_size; ++ void __iomem *msg_bar_reg; ++ ++ void __iomem *pcie_sio_decode_addr; ++ ++ struct aspeed_queue_message queue[ASPEED_QUEUE_NUM]; ++ ++ void __iomem *sio_mbox_reg; ++ struct uart_8250_port uart[VUART_MAX_PARMS]; ++ int uart_line[VUART_MAX_PARMS]; ++ ++ /* Interrupt ++ * The index of array is using to enum msi_index ++ */ ++ int *msi_idx_table; ++ ++ bool ast2700_soc1; ++ ++ /* AST2700 MMBI */ ++ struct aspeed_pcie_mmbi mmbi[MMBI_MAX_INST]; ++ int mmbi_start_msi; ++}; ++ ++#define PCIE_DEVICE_SIO_ADDR (0x2E * 4) ++#define BMC_MULTI_MSI 32 ++ ++#define DRIVER_NAME "aspeed-host-bmc-dev" ++ ++static int mmbi_desc_init(struct aspeed_mmbi_channel *chan); ++ ++static u8 mmbi_get_bmc_rdy(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_rdy; ++} ++ ++static u8 mmbi_get_bmc_up(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_up; ++} ++ ++static u8 mmbi_get_bmc_rst(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_rst; ++} ++ ++static u8 mmbi_get_host_rst(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ return hrws.h_rst; ++} ++ ++static u8 mmbi_get_host_up(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ return hrws.h_up; ++} ++ ++static void mmbi_set_host_rst(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ hrws.h_rst = set; ++ memcpy_toio(chan->hrws_vmem, &hrws, sizeof(hrws)); ++} ++ ++static void mmbi_set_host_rdy(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ hrws.h_rdy = set; ++ memcpy_toio(chan->hrws_vmem, &hrws, sizeof(hrws)); ++} ++ ++static void mmbi_set_host_up(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ hrws.h_up = set; ++ memcpy_toio(chan->hrws_vmem, &hrws, sizeof(hrws)); ++} ++ ++static void get_h2b_avail_buf_len(struct aspeed_mmbi_channel *chan, ssize_t *avail_buf_len) ++{ ++ struct device *dev = chan->dev; ++ u32 h2b_rp, h2b_wp; ++ ++ h2b_rp = GET_H2B_READ_POINTER(chan); ++ h2b_wp = GET_H2B_WRITE_POINTER(chan); ++ dev_dbg(dev, "MMBI HRWS - h2b_rp: 0x%0x, h2b_wp: 0x%0x\n", h2b_rp, h2b_wp); ++ ++ if (h2b_wp >= h2b_rp) ++ *avail_buf_len = chan->h2b_cb_size - h2b_wp + h2b_rp; ++ else ++ *avail_buf_len = h2b_rp - h2b_wp; ++} ++ ++static u8 mmbi_get_state(struct aspeed_mmbi_channel *chan) ++{ ++ u8 state = 0; ++ ++ state = mmbi_get_bmc_up(chan) << 3; ++ state |= mmbi_get_bmc_rst(chan) << 2; ++ state |= mmbi_get_host_up(chan) << 1; ++ state |= mmbi_get_host_rst(chan); ++ ++ dev_dbg(chan->dev, "MMBI state: 0x%x\n", state); ++ ++ return state; ++} ++ ++static void raise_h2b_interrupt(struct aspeed_mmbi_channel *chan) ++{ ++ if (!chan->bmc_int_en) ++ return; ++ ++ writeb(chan->bmc_int_value, chan->desc_vmem + chan->bmc_int_location); ++} ++ ++static int mmbi_state_check(struct aspeed_mmbi_channel *chan) ++{ ++ enum mmbi_state current_state = mmbi_get_state(chan); ++ struct device *dev = chan->dev; ++ int ret; ++ ++ switch (current_state) { ++ case INIT_COMPLETED: ++ dev_dbg(dev, "Get INIT_COMPLETED state from BMC"); ++ ++ ret = mmbi_desc_init(chan); ++ if (ret) { ++ dev_warn(dev, "Check MMBI signature timeout\n"); ++ raise_h2b_interrupt(chan); ++ } ++ return 1; ++ case RESET_REQ_BY_BMC: ++ dev_dbg(dev, "Get RESET_REQ_BY_BMC state from BMC"); ++ ++ /* Change state to RESET_ACKED */ ++ mmbi_set_host_rst(chan, 1); ++ ++ dev_dbg(dev, "Change state to RESET_ACKED to BMC"); ++ raise_h2b_interrupt(chan); ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++static int mmbi_bmc_up_check(struct aspeed_mmbi_channel *chan) ++{ ++ u64 __timeout_us = 1000; ++ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); ++ ++ for (;;) { ++ enum mmbi_state current_state = mmbi_get_state(chan); ++ ++ if (current_state == INIT_COMPLETED) ++ break; ++ if (__timeout_us && ktime_compare(ktime_get(), __timeout) > 0) ++ return -EAGAIN; ++ } ++ ++ return 0; ++} ++ ++static void update_host_rws(struct aspeed_mmbi_channel *chan, unsigned int w_len, ++ unsigned int r_len) ++{ ++ struct device *dev = chan->dev; ++ struct host_rws hrws; ++ u32 h2b_wp, b2h_rp; ++ ++ h2b_wp = GET_H2B_WRITE_POINTER(chan); ++ b2h_rp = GET_B2H_READ_POINTER(chan); ++ ++ dev_dbg(dev, "MMBI HRWS - b2h_rp: 0x%0x, h2b_wp: 0x%0x\n", b2h_rp, h2b_wp); ++ ++ /* Advance the H2B CB offset for next write */ ++ if ((h2b_wp + w_len) <= chan->h2b_cb_size) ++ h2b_wp += w_len; ++ else ++ h2b_wp = h2b_wp + w_len - chan->h2b_cb_size; ++ ++ /* Advance the B2H CB offset till where BMC read data */ ++ if ((b2h_rp + r_len) <= chan->b2h_cb_size) ++ b2h_rp += r_len; ++ else ++ b2h_rp = b2h_rp + r_len - chan->b2h_cb_size; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ hrws.h2b_wp = FIELD_GET(H2B_WRITE_POINTER_MASK, h2b_wp); ++ hrws.b2h_rp = FIELD_GET(B2H_READ_POINTER_MASK, b2h_rp); ++ memcpy_toio(chan->hrws_vmem, &hrws, sizeof(hrws)); ++ dev_dbg(dev, "Updating HRWS - b2h_rp: 0x%0x, h2b_wp: 0x%0x\n", b2h_rp, h2b_wp); ++ ++ if (w_len != 0) ++ raise_h2b_interrupt(chan); ++} ++ ++static int get_mmbi_header(struct aspeed_mmbi_channel *chan, u32 *data_length, u8 *type, ++ u32 *unread_data_len, u8 *padding) ++{ ++ u32 h2b_wp, h2b_rp, b2h_wp, b2h_rp; ++ struct mmbi_header header; ++ ++ h2b_wp = GET_H2B_WRITE_POINTER(chan); ++ h2b_rp = GET_H2B_READ_POINTER(chan); ++ b2h_wp = GET_B2H_WRITE_POINTER(chan); ++ b2h_rp = GET_B2H_READ_POINTER(chan); ++ dev_dbg(chan->dev, "MMBI HRWS - h2b_wp: 0x%0x, b2h_rp: 0x%0x\n", h2b_wp, b2h_rp); ++ dev_dbg(chan->dev, "MMBI HROS - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", b2h_wp, h2b_rp); ++ ++ if (b2h_wp >= b2h_rp) ++ *unread_data_len = b2h_wp - b2h_rp; ++ else ++ *unread_data_len = chan->b2h_cb_size - b2h_rp + b2h_wp; ++ ++ if (*unread_data_len < sizeof(struct mmbi_header)) { ++ dev_dbg(chan->dev, "No data to read(%d - %d)\n", b2h_wp, b2h_rp); ++ return -EAGAIN; ++ } ++ ++ dev_dbg(chan->dev, "READ MMBI header from: %p\n", chan->b2h_cb_vmem + b2h_rp); ++ ++ /* Extract MMBI protocol - protocol type and length */ ++ if ((b2h_rp + sizeof(header)) <= chan->b2h_cb_size) { ++ memcpy_fromio(&header, chan->b2h_cb_vmem + b2h_rp, sizeof(header)); ++ } else { ++ ssize_t chunk_len = chan->b2h_cb_size - b2h_rp; ++ ++ memcpy_fromio(&header, chan->b2h_cb_vmem + b2h_rp, chunk_len); ++ memcpy_fromio(((u8 *)&header) + chunk_len, chan->b2h_cb_vmem, ++ sizeof(header) - chunk_len); ++ } ++ ++ *data_length = (header.pkt_len << 2) - sizeof(header) - header.pkt_pad; ++ *padding = header.pkt_pad; ++ *type = header.pkt_type; ++ ++ return 0; ++} ++ ++static int aspeed_mmbi_write(struct aspeed_mmbi_channel *chan, const char *buffer, size_t len, ++ protocol_type type) ++{ ++ struct device *dev = chan->dev; ++ struct mmbi_header header = {0}; ++ ssize_t avail_buf_len; ++ ssize_t total_len; ++ ssize_t wt_offset; ++ ssize_t chunk_len; ++ ssize_t end_offset; ++ u8 padding = 0; ++ ++ /* If BMC READY bit is not set, Just discard the write. */ ++ if (!GET_BMC_READY_BIT(chan)) { ++ dev_dbg(dev, "Host not ready, discarding request...\n"); ++ return -EAGAIN; ++ } ++ ++ get_h2b_avail_buf_len(chan, &avail_buf_len); ++ ++ dev_dbg(dev, "H2B buffer empty space: %zd\n", avail_buf_len); ++ ++ /* Header size */ ++ total_len = len + 4; ++ ++ padding = total_len & 0x3; ++ if (padding) ++ padding = 4 - padding; ++ total_len += padding; ++ ++ /* Empty space should be more than write request data size */ ++ if (avail_buf_len <= sizeof(header) || (total_len > (avail_buf_len - sizeof(header)))) ++ return -ENOSPC; ++ ++ /* Fill multi-protocol header */ ++ header.pkt_type = type; ++ header.pkt_len = total_len >> 2; ++ header.pkt_pad = padding; ++ ++ wt_offset = GET_H2B_WRITE_POINTER(chan); ++ end_offset = chan->h2b_cb_size; ++ ++ /* Copy Header */ ++ if ((end_offset - wt_offset) >= sizeof(header)) { ++ memcpy_toio(chan->h2b_cb_vmem + wt_offset, &header, sizeof(header)); ++ wt_offset += sizeof(header); ++ } else { ++ chunk_len = end_offset - wt_offset; ++ dev_dbg(dev, "Write header chunk_len: %zd\n", chunk_len); ++ memcpy_toio(chan->h2b_cb_vmem + wt_offset, &header, chunk_len); ++ memcpy_toio(chan->h2b_cb_vmem, (u8 *)&header + chunk_len, ++ (sizeof(header) - chunk_len)); ++ wt_offset = (sizeof(header) - chunk_len); ++ } ++ ++ /* Write the data */ ++ if ((end_offset - wt_offset) >= len) { ++ memcpy_toio(&chan->h2b_cb_vmem[wt_offset], buffer, len); ++ wt_offset += len; ++ } else { ++ chunk_len = end_offset - wt_offset; ++ dev_dbg(dev, "Write data chunk_len: %zd\n", chunk_len); ++ memcpy_toio(&chan->h2b_cb_vmem[wt_offset], buffer, chunk_len); ++ wt_offset = 0; ++ memcpy_toio(&chan->h2b_cb_vmem[wt_offset], buffer + chunk_len, len - chunk_len); ++ wt_offset += len - chunk_len; ++ } ++ ++ update_host_rws(chan, total_len, 0); ++ ++ return 0; ++} ++ ++static void aspeed_mmbi_read(struct aspeed_mmbi_channel *chan, char *buffer, size_t len, u8 padding) ++{ ++ struct device *dev = chan->dev; ++ ssize_t rd_offset; ++ u32 b2h_rp; ++ ++ b2h_rp = GET_B2H_READ_POINTER(chan); ++ if ((b2h_rp + sizeof(struct mmbi_header)) <= chan->b2h_cb_size) ++ rd_offset = b2h_rp + sizeof(struct mmbi_header); ++ else ++ rd_offset = b2h_rp + sizeof(struct mmbi_header) - chan->b2h_cb_size; ++ ++ /* Extract data and copy to user space application */ ++ dev_dbg(dev, "READ MMBI Data from: %p and length: %zd\n", ++ chan->b2h_cb_vmem + rd_offset, len); ++ ++ if ((chan->b2h_cb_size - rd_offset) >= len) { ++ memcpy_fromio(buffer, chan->b2h_cb_vmem + rd_offset, len); ++ rd_offset += len; ++ } else { ++ ssize_t chunk_len; ++ ++ chunk_len = chan->b2h_cb_size - rd_offset; ++ dev_dbg(dev, "Read data chunk_len: %zd\n", chunk_len); ++ memcpy_fromio(buffer, chan->b2h_cb_vmem + rd_offset, chunk_len); ++ ++ rd_offset = 0; ++ memcpy_fromio(buffer + chunk_len, chan->b2h_cb_vmem + rd_offset, ++ len - chunk_len); ++ } ++ ++ update_host_rws(chan, 0, len + sizeof(struct mmbi_header) + padding); ++} ++ ++static void mctp_mmbi_rx(struct aspeed_mmbi_channel *chan) ++{ ++ struct net_device *ndev; ++ struct sk_buff *skb; ++ struct mctp_skb_cb *cb; ++ u32 req_data_len, unread_data_len; ++ u8 type, padding; ++ int status; ++ ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ return; ++ ++ dev_dbg(chan->dev, "%s: Length: 0x%0x, Protocol Type: %d, Unread data: %d\n", __func__, ++ req_data_len, type, unread_data_len); ++ ++ ndev = chan->ndev; ++ ++ skb = netdev_alloc_skb(ndev, req_data_len); ++ if (!skb) { ++ ndev->stats.rx_dropped++; ++ update_host_rws(chan, 0, req_data_len + sizeof(struct mmbi_header)); ++ return; ++ } ++ ++ skb->protocol = htons(ETH_P_MCTP); ++ aspeed_mmbi_read(chan, skb_put(skb, req_data_len), req_data_len, padding); ++ skb_reset_network_header(skb); ++ ++ cb = __mctp_cb(skb); ++ cb->halen = 0; ++ ++ status = netif_rx(skb); ++ if (status == NET_RX_SUCCESS) { ++ ndev->stats.rx_packets++; ++ ndev->stats.rx_bytes += req_data_len; ++ } else { ++ ndev->stats.rx_dropped++; ++ } ++} ++ ++static netdev_tx_t mctp_mmbi_tx(struct sk_buff *skb, struct net_device *ndev) ++{ ++ struct aspeed_mmbi_mctp *mctp = netdev_priv(ndev); ++ int ret; ++ ++ if (!mmbi_get_bmc_rdy(&mctp->mmbi->chan) || skb->len > MCTP_MMBI_MTU_MAX) { ++ ndev->stats.tx_dropped++; ++ goto out; ++ } ++ ++ ret = aspeed_mmbi_write(&mctp->mmbi->chan, skb->data, skb->len, MMBI_PROTOCOL_MCTP); ++ if (ret) { ++ netif_stop_queue(ndev); ++ return NETDEV_TX_BUSY; ++ } ++ ++ ndev->stats.tx_packets++; ++ ndev->stats.tx_bytes += skb->len; ++out: ++ kfree_skb(skb); ++ return NETDEV_TX_OK; ++} ++ ++static const struct net_device_ops mctp_mmbi_netdev_ops = { ++ .ndo_start_xmit = mctp_mmbi_tx, ++}; ++ ++static void aspeed_mctp_mmbi_setup(struct net_device *ndev) ++{ ++ ndev->type = ARPHRD_MCTP; ++ ++ /* we limit at the fixed MTU, which is also the MCTP-standard ++ * baseline MTU, so is also our minimum ++ */ ++ ndev->mtu = MCTP_MMBI_MTU; ++ ndev->max_mtu = MCTP_MMBI_MTU_MAX; ++ ndev->min_mtu = MCTP_MMBI_MTU_MIN; ++ ++ ndev->hard_header_len = 0; ++ ndev->addr_len = 0; ++ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; ++ ndev->flags = IFF_NOARP; ++ ndev->netdev_ops = &mctp_mmbi_netdev_ops; ++ ndev->needs_free_netdev = true; ++} ++ ++static int aspeed_mmbi_mctp_init(struct aspeed_mmbi_channel *chan) ++{ ++ struct aspeed_mmbi_mctp *mctp; ++ struct net_device *ndev; ++ char name[32]; ++ int ret; ++ ++ snprintf(name, sizeof(name), "mctpmmbi%d", chan->mmbi->id); ++ ndev = alloc_netdev(sizeof(*mctp), name, NET_NAME_ENUM, aspeed_mctp_mmbi_setup); ++ if (!ndev) ++ return -ENOMEM; ++ mctp = netdev_priv(ndev); ++ mctp->ndev = ndev; ++ mctp->mmbi = chan->mmbi; ++ ++ chan->ndev = ndev; ++ ++ ret = register_netdev(ndev); ++ if (ret) ++ goto free_netdev; ++ ++ return 0; ++ ++free_netdev: ++ free_netdev(ndev); ++ ++ return ret; ++} ++ ++static struct aspeed_pci_bmc_dev *file_aspeed_bmc_device(struct file *file) ++{ ++ return container_of(file->private_data, struct aspeed_pci_bmc_dev, miscdev); ++} ++ ++static int aspeed_pci_bmc_dev_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = file_aspeed_bmc_device(file); ++ unsigned long vsize = vma->vm_end - vma->vm_start; ++ pgprot_t prot = vma->vm_page_prot; ++ ++ if (vma->vm_pgoff + vsize > pci_bmc_dev->mem_bar_base + 0x100000) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (pci_bmc_dev->mem_bar_base >> PAGE_SHIFT) + vma->vm_pgoff, ++ vsize, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static const struct file_operations aspeed_pci_bmc_dev_fops = { ++ .owner = THIS_MODULE, ++ .mmap = aspeed_pci_bmc_dev_mmap, ++}; ++ ++static ssize_t aspeed_queue_rx(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_pci_bmc_dev *pci_bmc_device = queue->pci_bmc_device; ++ int index = queue->index; ++ u32 *data = (u32 *)buf; ++ int ret; ++ ++ ret = wait_event_interruptible(queue->rx_wait, ++ !(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) & ++ ((index == QUEUE1) ? BMC2HOST_Q1_EMPTY : BMC2HOST_Q2_EMPTY))); ++ if (ret) ++ return -EINTR; ++ ++ data[0] = readl(pci_bmc_device->msg_bar_reg + ++ ((index == QUEUE1) ? PCI_BMC_BMC2HOST_Q1 : PCI_BMC_BMC2HOST_Q2)); ++ ++ writel(HOST2BMC_INT_STS_DOORBELL | HOST2BMC_ENABLE_INTB, ++ pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS); ++ ++ return sizeof(u32); ++} ++ ++static ssize_t aspeed_queue_tx(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, ++ char *buf, loff_t off, size_t count) ++{ ++ struct aspeed_queue_message *queue = attr->private; ++ struct aspeed_pci_bmc_dev *pci_bmc_device = queue->pci_bmc_device; ++ int index = queue->index; ++ u32 tx_buff; ++ int ret; ++ ++ if (count != sizeof(u32)) ++ return -EINVAL; ++ ++ ret = wait_event_interruptible(queue->tx_wait, ++ !(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) & ++ ((index == QUEUE1) ? HOST2BMC_Q1_FULL : HOST2BMC_Q2_FULL))); ++ if (ret) ++ return -EINTR; ++ ++ memcpy(&tx_buff, buf, 4); ++ writel(tx_buff, pci_bmc_device->msg_bar_reg + ++ ((index == QUEUE1) ? PCI_BMC_HOST2BMC_Q1 : PCI_BMC_HOST2BMC_Q2)); ++ //trigger to host ++ writel(HOST2BMC_INT_STS_DOORBELL | HOST2BMC_ENABLE_INTB, ++ pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS); ++ ++ return sizeof(u32); ++} ++ ++static irqreturn_t aspeed_pci_host_bmc_device_interrupt(int irq, void *dev_id) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_device = dev_id; ++ u32 bmc2host_q_sts = readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS); ++ ++ if (bmc2host_q_sts & BMC2HOST_INT_STS_DOORBELL) ++ writel(BMC2HOST_INT_STS_DOORBELL, ++ pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS); ++ ++ if (bmc2host_q_sts & BMC2HOST_ENABLE_INTB) ++ writel(BMC2HOST_ENABLE_INTB, pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS); ++ ++ if (bmc2host_q_sts & BMC2HOST_Q1_FULL) ++ dev_info(pci_bmc_device->dev, "Q1 Full\n"); ++ ++ if (bmc2host_q_sts & BMC2HOST_Q2_FULL) ++ dev_info(pci_bmc_device->dev, "Q2 Full\n"); ++ ++ //check q1 ++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) & HOST2BMC_Q1_FULL)) ++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE1].tx_wait); ++ ++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) & BMC2HOST_Q1_EMPTY)) ++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE1].rx_wait); ++ //chech q2 ++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) & HOST2BMC_Q2_FULL)) ++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE2].tx_wait); ++ ++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) & BMC2HOST_Q2_EMPTY)) ++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE2].rx_wait); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_pci_host_mbox_interrupt(int irq, void *dev_id) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_device = dev_id; ++ u32 isr = readl(pci_bmc_device->sio_mbox_reg + 0x94); ++ ++ if (isr & BIT(7)) ++ writel(BIT(7), pci_bmc_device->sio_mbox_reg + 0x94); ++ ++ return IRQ_HANDLED; ++} ++ ++static void aspeed_mmbi_work_func(struct work_struct *workq) ++{ ++ struct aspeed_mmbi_channel *chan = container_of(workq, struct aspeed_mmbi_channel, work); ++ u32 weight = 256, req_data_len, unread_data_len; ++ u8 type, padding; ++ int i; ++ ++ for (i = 0; i < weight; i++) { ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ return; ++ ++ dev_dbg(chan->dev, "%s: Length: 0x%0x, Protocol Type: %d\n", ++ __func__, req_data_len, type); ++ ++ if (type == MMBI_PROTOCOL_MCTP) ++ mctp_mmbi_rx(chan); ++ else ++ /* Discard data and advance the hrws */ ++ update_host_rws(chan, 0, req_data_len + sizeof(struct mmbi_header) + padding); ++ ++ raise_h2b_interrupt(chan); ++ } ++ ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ queue_work(system_unbound_wq, &chan->work); ++} ++ ++static irqreturn_t aspeed_pci_mmbi_isr(int irq, void *dev_id) ++{ ++ struct aspeed_pcie_mmbi *mmbi = dev_id; ++ struct aspeed_mmbi_channel *chan = &mmbi->chan; ++ ssize_t avail_buf_len; ++ ++ get_h2b_avail_buf_len(chan, &avail_buf_len); ++ if (avail_buf_len > MCTP_MMBI_MTU_MAX) { ++ if (netif_queue_stopped(chan->ndev)) { ++ dev_dbg(chan->dev, "Wake up mctp net device\n"); ++ netif_wake_queue(chan->ndev); ++ } ++ } ++ ++ if (mmbi_state_check(chan)) ++ return IRQ_HANDLED; ++ ++ queue_work(system_unbound_wq, &chan->work); ++ ++ return IRQ_HANDLED; ++} ++ ++static void aspeed_pci_setup_irq_resource(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ ++ /* Assign static msi index table by platform */ ++ if (pdev->revision == 0x27) { ++ if (pci_bmc_dev->driver_data == ASPEED) { ++ pci_bmc_dev->msi_idx_table = ast2700_soc0_msi_idx_table; ++ } else { ++ pci_bmc_dev->msi_idx_table = ast2700_soc1_msi_idx_table; ++ pci_bmc_dev->ast2700_soc1 = true; ++ } ++ } else { ++ pci_bmc_dev->msi_idx_table = ast2600_msi_idx_table; ++ } ++ ++ if (pci_alloc_irq_vectors(pdev, 1, BMC_MULTI_MSI, PCI_IRQ_INTX | PCI_IRQ_MSI) <= 1) ++ /* Set all msi index to the first vector */ ++ memset(pci_bmc_dev->msi_idx_table, 0, sizeof(int) * MAX_MSI_NUM); ++} ++ ++static int aspeed_pci_bmc_device_setup_queue(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_device = pci_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ int ret, i; ++ ++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) { ++ struct aspeed_queue_message *queue = &pci_bmc_device->queue[i]; ++ ++ init_waitqueue_head(&queue->tx_wait); ++ init_waitqueue_head(&queue->rx_wait); ++ ++ sysfs_bin_attr_init(&queue->bin); ++ ++ /* Queue name index starts from 1 */ ++ queue->bin.attr.name = ++ devm_kasprintf(dev, GFP_KERNEL, "pci-bmc-dev-queue%d", (i + 1)); ++ queue->bin.attr.mode = 0600; ++ queue->bin.read = aspeed_queue_rx; ++ queue->bin.write = aspeed_queue_tx; ++ queue->bin.size = 4; ++ queue->bin.private = queue; ++ ++ ret = sysfs_create_bin_file(&pdev->dev.kobj, &queue->bin); ++ if (ret) { ++ dev_err(dev, "error for bin%d file\n", i); ++ return ret; ++ } ++ ++ queue->kn = kernfs_find_and_get(dev->kobj.sd, queue->bin.attr.name); ++ if (!queue->kn) { ++ sysfs_remove_bin_file(&dev->kobj, &queue->bin); ++ return ret; ++ } ++ ++ queue->index = i; ++ queue->pci_bmc_device = pci_bmc_device; ++ } ++ ++ return 0; ++} ++ ++static int aspeed_pci_bmc_device_setup_vuart(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ u16 vuart_ioport; ++ int ret, i; ++ ++ for (i = 0; i < VUART_MAX_PARMS; i++) { ++ /* Assign the line to non-exist device */ ++ pci_bmc_dev->uart_line[i] = -ENOENT; ++ vuart_ioport = 0x3F8 - (i * 0x100); ++ pci_bmc_dev->uart[i].port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; ++ pci_bmc_dev->uart[i].port.uartclk = 115200 * 16; ++ pci_bmc_dev->uart[i].port.irq = ++ pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[VUART0_MSI + i]); ++ pci_bmc_dev->uart[i].port.dev = dev; ++ pci_bmc_dev->uart[i].port.iotype = UPIO_MEM32; ++ pci_bmc_dev->uart[i].port.iobase = 0; ++ pci_bmc_dev->uart[i].port.mapbase = ++ pci_bmc_dev->message_bar_base + (vuart_ioport << 2); ++ pci_bmc_dev->uart[i].port.membase = 0; ++ pci_bmc_dev->uart[i].port.type = PORT_16550A; ++ pci_bmc_dev->uart[i].port.flags |= (UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE); ++ pci_bmc_dev->uart[i].port.regshift = 2; ++ ret = serial8250_register_8250_port(&pci_bmc_dev->uart[i]); ++ if (ret < 0) { ++ dev_err_probe(dev, ret, "Can't setup PCIe VUART\n"); ++ return ret; ++ } ++ pci_bmc_dev->uart_line[i] = ret; ++ } ++ return 0; ++} ++ ++static int aspeed_pci_bmc_device_setup_memory_mapping(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ pci_bmc_dev->miscdev.minor = MISC_DYNAMIC_MINOR; ++ pci_bmc_dev->miscdev.name = ++ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DRIVER_NAME, pci_bmc_dev->id); ++ pci_bmc_dev->miscdev.fops = &aspeed_pci_bmc_dev_fops; ++ pci_bmc_dev->miscdev.parent = dev; ++ ++ ret = misc_register(&pci_bmc_dev->miscdev); ++ if (ret) { ++ pr_err("host bmc register fail %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int aspeed_pci_bmc_device_setup_mbox(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ struct device *dev = &pdev->dev; ++ int ret; ++ ++ /* setup mbox */ ++ pci_bmc_dev->pcie_sio_decode_addr = pci_bmc_dev->msg_bar_reg + PCIE_DEVICE_SIO_ADDR; ++ writel(0xaa, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0xa5, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0xa5, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x07, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x0e, pci_bmc_dev->pcie_sio_decode_addr + 0x04); ++ /* disable */ ++ writel(0x30, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x00, pci_bmc_dev->pcie_sio_decode_addr + 0x04); ++ /* set decode address 0x100 */ ++ writel(0x60, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x01, pci_bmc_dev->pcie_sio_decode_addr + 0x04); ++ writel(0x61, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x00, pci_bmc_dev->pcie_sio_decode_addr + 0x04); ++ /* enable */ ++ writel(0x30, pci_bmc_dev->pcie_sio_decode_addr); ++ writel(0x01, pci_bmc_dev->pcie_sio_decode_addr + 0x04); ++ pci_bmc_dev->sio_mbox_reg = pci_bmc_dev->msg_bar_reg + 0x400; ++ ++ ret = devm_request_irq(dev, ++ pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]), ++ aspeed_pci_host_mbox_interrupt, IRQF_SHARED, ++ devm_kasprintf(dev, GFP_KERNEL, "aspeed-sio-mbox%d", pci_bmc_dev->id), ++ pci_bmc_dev); ++ if (ret) { ++ pr_err("host bmc device Unable to get IRQ %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int mmbi_signature_check(struct aspeed_mmbi_channel *chan) ++{ ++ u8 signature[6]; ++ u64 __timeout_us = 1000; ++ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); ++ ++ for (;;) { ++ memcpy_fromio(signature, chan->desc_vmem, 6); ++ if (!memcmp(MMBI_SIGNATURE, signature, 6)) ++ break; ++ if (__timeout_us && ktime_compare(ktime_get(), __timeout) > 0) ++ return -ETIMEDOUT; ++ } ++ return 0; ++} ++ ++static int mmbi_desc_init(struct aspeed_mmbi_channel *chan) ++{ ++ struct aspeed_pcie_mmbi *mmbi = chan->mmbi; ++ struct device *dev = chan->dev; ++ struct mmbi_cap_desc desc; ++ u8 __iomem *desc_base = chan->desc_vmem; ++ int ret; ++ ++ /* First, check mmbi signature "#MMBI$" */ ++ ret = mmbi_signature_check(chan); ++ if (ret) { ++ dev_warn(dev, "Check MMBI signature timeout\n"); ++ return ret; ++ } ++ ++ memcpy_fromio(&desc, chan->desc_vmem, sizeof(desc)); ++ ++ /* HROS */ ++ if (((desc.bt_desc.h_ros_p << 3) + sizeof(struct host_ros)) >= mmbi->mem_size) { ++ dev_warn(dev, "HROS is out of range"); ++ return -EINVAL; ++ } ++ chan->hros_vmem = desc_base + (desc.bt_desc.h_ros_p << 3); ++ ++ /* HRWS */ ++ if (((desc.bt_desc.h_rws_p << 3) + sizeof(struct host_rws)) >= mmbi->mem_size) { ++ dev_warn(dev, "HRWS is out of range"); ++ return -EINVAL; ++ } ++ chan->hrws_vmem = desc_base + (desc.bt_desc.h_rws_p << 3); ++ ++ ret = mmbi_bmc_up_check(chan); ++ if (ret) { ++ dev_warn(dev, "Check BMC up timeout\n"); ++ return ret; ++ } ++ ++ /* Implementations of MMBI described in this document shall indicate version 1 of MMBI */ ++ if (desc.version != 1) { ++ dev_warn(dev, "MMBI version must be 1"); ++ goto err_mismatch; ++ } ++ ++ /* This MMBI interface is intended for OS use */ ++ if (desc.os_use != 1) { ++ dev_warn(dev, "This MMBI does not provide for OS"); ++ goto err_mismatch; ++ } ++ ++ /* Current application is only MMBI Variable Packet Size Circular Buffers (VPSCB) v1 */ ++ if (desc.buffer_type != 1) { ++ dev_warn(dev, "The buffer type is not VPSCB: (%d)", desc.buffer_type); ++ goto err_mismatch; ++ } ++ ++ /* B2H Buffer */ ++ if (((desc.b2h_ba << 3) + desc.b2h_l) > mmbi->mem_size) { ++ dev_warn(dev, "B2H buffer is out of range"); ++ goto err_mismatch; ++ } ++ chan->b2h_cb_vmem = desc_base + (desc.b2h_ba << 3); ++ chan->b2h_cb_size = desc.b2h_l; ++ ++ /* H2B Buffer */ ++ if (((desc.h2b_ba << 3) + desc.h2b_l) > mmbi->mem_size) { ++ dev_warn(dev, "H2B buffer is out of range"); ++ goto err_mismatch; ++ } ++ chan->h2b_cb_vmem = desc_base + (desc.h2b_ba << 3); ++ chan->h2b_cb_size = desc.h2b_l; ++ ++ dev_dbg(dev, "B2H mapped addr - desc: %p, hros: %p, b2h_cb: %p\n", ++ chan->desc_vmem, chan->hros_vmem, chan->b2h_cb_vmem); ++ dev_dbg(dev, "H2B mapped addr - hrws: %p, h2b_cb: %p\n", chan->hrws_vmem, ++ chan->h2b_cb_vmem); ++ ++ dev_dbg(dev, "B2H buffer size: 0x%0x\n", chan->b2h_cb_size); ++ dev_dbg(dev, "H2B buffer size: 0x%0x\n", chan->h2b_cb_size); ++ ++ /* Host Interrupt */ ++ chan->host_int_en = !!desc.bt_desc.h_int_t; ++ if (chan->host_int_en) { ++ /* 1 for PCIe */ ++ if (desc.bt_desc.h_int_t != 1) ++ chan->host_int_en = 0; ++ else ++ chan->host_int_location = desc.bt_desc.h_int_l; ++ } ++ ++ /* BMC Interrupt */ ++ chan->bmc_int_en = !!desc.bt_desc.bmc_int_t; ++ if (chan->bmc_int_en) { ++ if (desc.bt_desc.bmc_int_t != 1) { ++ chan->bmc_int_en = 0; ++ } else { ++ chan->bmc_int_location = desc.bt_desc.bmc_int_l; ++ chan->bmc_int_vmem = desc_base + chan->bmc_int_location; ++ chan->bmc_int_value = desc.bt_desc.bmc_int_v; ++ } ++ } ++ ++ INIT_WORK(&chan->work, aspeed_mmbi_work_func); ++ ++ return 0; ++err_mismatch: ++ /* Change state to INIT_MISMATCH */ ++ mmbi_set_host_rst(chan, 1); ++ return -EINVAL; ++} ++ ++static int aspeed_pci_mmbi_init(struct aspeed_pcie_mmbi *mmbi) ++{ ++ struct aspeed_mmbi_channel *chan = &mmbi->chan; ++ struct device *dev = chan->dev; ++ int ret; ++ ++ chan->desc_vmem = mmbi->mem; ++ chan->mmbi = mmbi; ++ ++ ret = mmbi_desc_init(chan); ++ if (ret) { ++ dev_err(dev, "Unable to init mmbi desc\n"); ++ return ret; ++ } ++ ++ /* Initialize MTCP function */ ++ ret = aspeed_mmbi_mctp_init(chan); ++ if (ret) { ++ dev_err(dev, "Unable to init mctp\n"); ++ return ret; ++ } ++ ++ /* Change state to NORMAL_RUNTIME */ ++ mmbi_set_host_up(chan, 1); ++ mmbi_set_host_rdy(chan, 1); ++ /* Trigger BMC to finish normal runtime state */ ++ raise_h2b_interrupt(chan); ++ ++ return 0; ++} ++ ++/* AST2700 PCIe MMBI ++ * SoC : | 0 | 1 | ++ * BAR : | 2 3 4 5 | 0 1 2 3 4 5 | ++ * MMBI: | 0 1 2 3 | 0 1 2 3 4 5 | ++ */ ++static void aspeed_pci_bmc_device_setup_mmbi(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ struct aspeed_pcie_mmbi *mmbi; ++ u32 start_bar = 2, mmbi_max_inst = 4, start_msi = MMBI0_MSI; /* AST2700 SoC0 */ ++ int i, rc = 0; ++ ++ /* AST2700 A1 supports MMBI */ ++ if (pdev->revision != 0x27) ++ return; ++ ++ if (pci_bmc_dev->ast2700_soc1) { ++ /* AST2700 SoC1 */ ++ start_bar = 0; ++ mmbi_max_inst = 6; ++ start_msi = 0; ++ } ++ ++ for (i = 0; i < mmbi_max_inst; i++) { ++ mmbi = &pci_bmc_dev->mmbi[i]; ++ ++ /* Get MMBI BAR resource */ ++ mmbi->base = pci_resource_start(pdev, start_bar + i); ++ mmbi->mem_size = pci_resource_len(pdev, start_bar + i); ++ ++ /* Check if there is bar */ ++ if (!mmbi->mem_size) ++ continue; ++ ++ mmbi->mem = pci_ioremap_bar(pdev, start_bar + i); ++ if (!mmbi->mem) { ++ mmbi->mem_size = 0; ++ continue; ++ } ++ ++ mmbi->chan.dev = &pdev->dev; ++ mmbi->dev_name = devm_kasprintf(mmbi->chan.dev, GFP_KERNEL, "pci-mmbi%d", i); ++ mmbi->id = i; ++ rc = aspeed_pci_mmbi_init(mmbi); ++ if (rc < 0) { ++ pr_err("Initialize MMBI device failed.\n"); ++ goto free_ioremap; ++ } ++ ++ if (mmbi->chan.host_int_en) { ++ mmbi->irq = pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[start_msi + i]); ++ rc = devm_request_irq(&pdev->dev, mmbi->irq, aspeed_pci_mmbi_isr, ++ IRQF_SHARED, mmbi->dev_name, mmbi); ++ if (rc) { ++ pr_err("MMBI device %s unable to get IRQ %d\n", mmbi->dev_name, rc); ++ mmbi->irq = 0; ++ goto free_ioremap; ++ } ++ } else { ++ mmbi->irq = 0; ++ } ++ continue; ++free_ioremap: ++ mmbi->mem_size = 0; ++ pci_iounmap(pdev, mmbi->mem); ++ } ++} ++ ++static void aspeed_pci_host_bmc_device_release_queue(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ int i; ++ ++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) ++ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_bmc_dev->queue[i].bin); ++} ++ ++static void aspeed_pci_host_bmc_device_release_vuart(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ int i; ++ ++ for (i = 0; i < VUART_MAX_PARMS; i++) { ++ if (pci_bmc_dev->uart_line[i] >= 0) ++ serial8250_unregister_port(pci_bmc_dev->uart_line[i]); ++ } ++} ++ ++static void aspeed_pci_host_bmc_device_release_memory_mapping(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ ++ if (!list_empty(&pci_bmc_dev->miscdev.list)) ++ misc_deregister(&pci_bmc_dev->miscdev); ++} ++ ++static void aspeed_pci_release_mmbi(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ struct aspeed_pcie_mmbi *mmbi; ++ int i; ++ ++ if (pdev->revision != 0x27) ++ return; ++ ++ for (i = 0; i < MMBI_MAX_INST; i++) { ++ mmbi = &pci_bmc_dev->mmbi[i]; ++ ++ if (mmbi->mem_size == 0) ++ continue; ++ ++ cancel_work_sync(&mmbi->chan.work); ++ ++ mmbi_set_host_rdy(&mmbi->chan, 0); ++ mmbi_set_host_up(&mmbi->chan, 0); ++ ++ unregister_netdev(mmbi->chan.ndev); ++ ++ if (mmbi->mem) ++ pci_iounmap(pdev, mmbi->mem); ++ if (mmbi->irq != 0) ++ devm_free_irq(&pdev->dev, mmbi->irq, mmbi); ++ } ++} ++ ++static int aspeed_pci_host_setup(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ int rc = 0; ++ ++ /* Get share memory BAR */ ++ pci_bmc_dev->mem_bar_base = pci_resource_start(pdev, 0); ++ pci_bmc_dev->mem_bar_size = pci_resource_len(pdev, 0); ++ pci_bmc_dev->mem_bar_reg = pci_ioremap_bar(pdev, 0); ++ if (!pci_bmc_dev->mem_bar_reg) ++ return -ENOMEM; ++ ++ /* Get Message BAR */ ++ pci_bmc_dev->message_bar_base = pci_resource_start(pdev, 1); ++ pci_bmc_dev->message_bar_size = pci_resource_len(pdev, 1); ++ pci_bmc_dev->msg_bar_reg = pci_ioremap_bar(pdev, 1); ++ if (!pci_bmc_dev->msg_bar_reg) { ++ rc = -ENOMEM; ++ goto out_free0; ++ } ++ ++ /* AST2600 ERRTA40: dummy read */ ++ if (pdev->revision < 0x27) ++ (void)__raw_readl((void __iomem *)pci_bmc_dev->msg_bar_reg); ++ ++ rc = aspeed_pci_bmc_device_setup_queue(pdev); ++ if (rc) { ++ pr_err("Cannot setup Queue Message"); ++ goto out_free1; ++ } ++ ++ rc = aspeed_pci_bmc_device_setup_memory_mapping(pdev); ++ if (rc) { ++ pr_err("Cannot setup Memory Mapping"); ++ goto out_free_queue; ++ } ++ ++ rc = aspeed_pci_bmc_device_setup_mbox(pdev); ++ if (rc) { ++ pr_err("Cannot setup Mailnbox"); ++ goto out_free_mmapping; ++ } ++ ++ rc = aspeed_pci_bmc_device_setup_vuart(pdev); ++ if (rc) { ++ pr_err("Cannot setup Virtual UART"); ++ goto out_free_mbox; ++ } ++ ++ rc = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[BMC_MSI]), ++ aspeed_pci_host_bmc_device_interrupt, IRQF_SHARED, ++ pci_bmc_dev->miscdev.name, pci_bmc_dev); ++ if (rc) { ++ pr_err("Get BMC DEVICE IRQ failed. (err=%d)\n", rc); ++ goto out_free_uart; ++ } ++ ++ /* Setup AST2700 PCIe MMBI device */ ++ aspeed_pci_bmc_device_setup_mmbi(pdev); ++ ++ return 0; ++ ++out_free_uart: ++ aspeed_pci_host_bmc_device_release_vuart(pdev); ++out_free_mbox: ++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]), ++ pci_bmc_dev); ++out_free_mmapping: ++ aspeed_pci_host_bmc_device_release_memory_mapping(pdev); ++out_free_queue: ++ aspeed_pci_host_bmc_device_release_queue(pdev); ++out_free1: ++ pci_iounmap(pdev, pci_bmc_dev->msg_bar_reg); ++out_free0: ++ pci_iounmap(pdev, pci_bmc_dev->mem_bar_reg); ++ ++ pci_release_regions(pdev); ++ return rc; ++} ++ ++static int aspeed_pci_host_mmbi_device_setup(struct pci_dev *pdev) ++{ ++ aspeed_pci_bmc_device_setup_mmbi(pdev); ++ return 0; ++} ++ ++static struct aspeed_platform aspeed_pcie_host[] = { ++ { .setup = aspeed_pci_host_setup }, ++ { .setup = aspeed_pci_host_mmbi_device_setup }, ++ { 0 } ++}; ++ ++static int aspeed_pci_host_bmc_device_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev; ++ int rc = 0; ++ ++ pr_info("ASPEED BMC PCI ID %04x:%04x, IRQ=%u\n", pdev->vendor, pdev->device, pdev->irq); ++ ++ pci_bmc_dev = devm_kzalloc(&pdev->dev, sizeof(*pci_bmc_dev), GFP_KERNEL); ++ if (!pci_bmc_dev) ++ return -ENOMEM; ++ ++ /* Get platform id */ ++ pci_bmc_dev->driver_data = ent->driver_data; ++ pci_bmc_dev->platform = &aspeed_pcie_host[ent->driver_data]; ++ ++ pci_bmc_dev->id = ida_simple_get(&bmc_device_ida, 0, 0, GFP_KERNEL); ++ if (pci_bmc_dev->id < 0) ++ return pci_bmc_dev->id; ++ ++ rc = pci_enable_device(pdev); ++ if (rc) { ++ dev_err(&pdev->dev, "pci_enable_device() returned error %d\n", rc); ++ return rc; ++ } ++ ++ pci_set_master(pdev); ++ pci_set_drvdata(pdev, pci_bmc_dev); ++ ++ /* Prepare IRQ resource */ ++ aspeed_pci_setup_irq_resource(pdev); ++ ++ /* Setup BMC PCI device */ ++ rc = pci_bmc_dev->platform->setup(pdev); ++ if (rc) { ++ dev_err(&pdev->dev, "ASPEED PCIe Host device returned error %d\n", rc); ++ pci_free_irq_vectors(pdev); ++ pci_disable_device(pdev); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void aspeed_pci_host_bmc_device_remove(struct pci_dev *pdev) ++{ ++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev); ++ ++ if (pci_bmc_dev->driver_data == ASPEED) { ++ aspeed_pci_host_bmc_device_release_queue(pdev); ++ aspeed_pci_host_bmc_device_release_memory_mapping(pdev); ++ aspeed_pci_host_bmc_device_release_vuart(pdev); ++ ++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[BMC_MSI]), ++ pci_bmc_dev); ++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]), ++ pci_bmc_dev); ++ } ++ ++ aspeed_pci_release_mmbi(pdev); ++ ++ ida_simple_remove(&bmc_device_ida, pci_bmc_dev->id); ++ ++ pci_iounmap(pdev, pci_bmc_dev->msg_bar_reg); ++ pci_iounmap(pdev, pci_bmc_dev->mem_bar_reg); ++ ++ pci_free_irq_vectors(pdev); ++ pci_release_regions(pdev); ++ pci_disable_device(pdev); ++} ++ ++/** ++ * This table holds the list of (VendorID,DeviceID) supported by this driver ++ * ++ */ ++static struct pci_device_id aspeed_host_bmc_dev_pci_ids[] = { ++ /* ASPEED BMC Device */ ++ { PCI_DEVICE(0x1A03, 0x2402), .class = 0xFF0000, .class_mask = 0xFFFF00, ++ .driver_data = ASPEED }, ++ /* AST2700 SoC1 MMBI device */ ++ { PCI_DEVICE(0x1A03, 0x2402), .class = 0x0C0C00, .class_mask = (0xFFFF00), ++ .driver_data = ASPEED_AST2700_SOC1 }, ++ { ++ 0, ++ } ++}; ++ ++MODULE_DEVICE_TABLE(pci, aspeed_host_bmc_dev_pci_ids); ++ ++static struct pci_driver aspeed_host_bmc_dev_driver = { ++ .name = DRIVER_NAME, ++ .id_table = aspeed_host_bmc_dev_pci_ids, ++ .probe = aspeed_pci_host_bmc_device_probe, ++ .remove = aspeed_pci_host_bmc_device_remove, ++}; ++ ++static int __init aspeed_host_bmc_device_init(void) ++{ ++ return pci_register_driver(&aspeed_host_bmc_dev_driver); ++} ++ ++static void aspeed_host_bmc_device_exit(void) ++{ ++ /* unregister pci driver */ ++ pci_unregister_driver(&aspeed_host_bmc_dev_driver); ++} ++ ++late_initcall(aspeed_host_bmc_device_init); ++module_exit(aspeed_host_bmc_device_exit); ++ ++MODULE_AUTHOR("Ryan Chen "); ++MODULE_DESCRIPTION("ASPEED Host BMC DEVICE Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c +--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c 2025-12-23 10:16:21.124032669 +0000 +@@ -353,7 +353,7 @@ + .of_match_table = aspeed_lpc_ctrl_match, + }, + .probe = aspeed_lpc_ctrl_probe, +- .remove_new = aspeed_lpc_ctrl_remove, ++ .remove = aspeed_lpc_ctrl_remove, + }; + + module_platform_driver(aspeed_lpc_ctrl_driver); +diff --git a/drivers/soc/aspeed/aspeed-lpc-mbox.c b/drivers/soc/aspeed/aspeed-lpc-mbox.c +--- a/drivers/soc/aspeed/aspeed-lpc-mbox.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-lpc-mbox.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,406 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright 2017 IBM Corporation ++ * Copyright 2021 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "aspeed-mbox" ++ ++#define ASPEED_MBOX_DR(dr, n) (dr + (n * 4)) ++#define ASPEED_MBOX_STR(str, n) (str + (n / 8) * 4) ++#define ASPEED_MBOX_BIE(bie, n) (bie + (n / 8) * 4) ++#define ASPEED_MBOX_HIE(hie, n) (hie + (n / 8) * 4) ++ ++#define ASPEED_MBOX_BCR_RECV BIT(7) ++#define ASPEED_MBOX_BCR_MASK BIT(1) ++#define ASPEED_MBOX_BCR_SEND BIT(0) ++ ++/* ioctl code */ ++#define ASPEED_MBOX_IOCTL 0xA3 ++#define ASPEED_MBOX_IOCTL_GET_SIZE \ ++ _IOR(ASPEED_MBOX_IOCTL, 0, struct aspeed_mbox_ioctl_data) ++ ++struct aspeed_mbox_ioctl_data { ++ unsigned int data; ++}; ++ ++struct aspeed_mbox_model { ++ unsigned int dr_num; ++ ++ /* offsets to the MBOX registers */ ++ unsigned int dr; ++ unsigned int str; ++ unsigned int bcr; ++ unsigned int hcr; ++ unsigned int bie; ++ unsigned int hie; ++}; ++ ++struct aspeed_mbox { ++ struct miscdevice miscdev; ++ struct regmap *map; ++ unsigned int base; ++ wait_queue_head_t queue; ++ struct mutex mutex; ++ const struct aspeed_mbox_model *model; ++}; ++ ++static atomic_t aspeed_mbox_open_count = ATOMIC_INIT(0); ++ ++static u8 aspeed_mbox_inb(struct aspeed_mbox *mbox, int reg) ++{ ++ /* ++ * The mbox registers are actually only one byte but are addressed ++ * four bytes apart. The other three bytes are marked 'reserved', ++ * they *should* be zero but lets not rely on it. ++ * I am going to rely on the fact we can casually read/write to them... ++ */ ++ unsigned int val = 0xff; /* If regmap throws an error return 0xff */ ++ int rc = regmap_read(mbox->map, mbox->base + reg, &val); ++ ++ if (rc) ++ dev_err(mbox->miscdev.parent, "regmap_read() failed with " ++ "%d (reg: 0x%08x)\n", rc, reg); ++ ++ return val & 0xff; ++} ++ ++static void aspeed_mbox_outb(struct aspeed_mbox *mbox, u8 data, int reg) ++{ ++ int rc = regmap_write(mbox->map, mbox->base + reg, data); ++ ++ if (rc) ++ dev_err(mbox->miscdev.parent, "regmap_write() failed with " ++ "%d (data: %u reg: 0x%08x)\n", rc, data, reg); ++} ++ ++static struct aspeed_mbox *file_mbox(struct file *file) ++{ ++ return container_of(file->private_data, struct aspeed_mbox, miscdev); ++} ++ ++static int aspeed_mbox_open(struct inode *inode, struct file *file) ++{ ++ struct aspeed_mbox *mbox = file_mbox(file); ++ const struct aspeed_mbox_model *model = mbox->model; ++ ++ if (atomic_inc_return(&aspeed_mbox_open_count) == 1) { ++ /* ++ * Clear the interrupt status bit if it was left on and unmask ++ * interrupts. ++ * ASPEED_MBOX_BCR_RECV bit is W1C, this also unmasks in 1 step ++ */ ++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr); ++ return 0; ++ } ++ ++ atomic_dec(&aspeed_mbox_open_count); ++ return -EBUSY; ++} ++ ++static ssize_t aspeed_mbox_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct aspeed_mbox *mbox = file_mbox(file); ++ const struct aspeed_mbox_model *model = mbox->model; ++ char __user *p = buf; ++ ssize_t ret; ++ int i; ++ ++ if (!access_ok(buf, count)) ++ return -EFAULT; ++ ++ if (count + *ppos > model->dr_num) ++ return -EINVAL; ++ ++ if (file->f_flags & O_NONBLOCK) { ++ if (!(aspeed_mbox_inb(mbox, model->bcr) & ++ ASPEED_MBOX_BCR_RECV)) ++ return -EAGAIN; ++ } else if (wait_event_interruptible(mbox->queue, ++ aspeed_mbox_inb(mbox, model->bcr) & ++ ASPEED_MBOX_BCR_RECV)) { ++ return -ERESTARTSYS; ++ } ++ ++ mutex_lock(&mbox->mutex); ++ ++ for (i = *ppos; count > 0 && i < model->dr_num; i++) { ++ uint8_t reg = aspeed_mbox_inb(mbox, ASPEED_MBOX_DR(model->dr, i)); ++ ++ ret = __put_user(reg, p); ++ if (ret) ++ goto out_unlock; ++ ++ p++; ++ count--; ++ } ++ ++ /* ASPEED_MBOX_BCR_RECV bit is write to clear, this also unmasks in 1 step */ ++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr); ++ ret = p - buf; ++ ++out_unlock: ++ mutex_unlock(&mbox->mutex); ++ return ret; ++} ++ ++static ssize_t aspeed_mbox_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct aspeed_mbox *mbox = file_mbox(file); ++ const struct aspeed_mbox_model *model = mbox->model; ++ const char __user *p = buf; ++ ssize_t ret; ++ char c; ++ int i; ++ ++ if (!access_ok(buf, count)) ++ return -EFAULT; ++ ++ if (count + *ppos > model->dr_num) ++ return -EINVAL; ++ ++ mutex_lock(&mbox->mutex); ++ ++ for (i = *ppos; count > 0 && i < model->dr_num; i++) { ++ ret = __get_user(c, p); ++ if (ret) ++ goto out_unlock; ++ ++ aspeed_mbox_outb(mbox, c, ASPEED_MBOX_DR(model->dr, i)); ++ p++; ++ count--; ++ } ++ ++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_SEND, model->bcr); ++ ret = p - buf; ++ ++out_unlock: ++ mutex_unlock(&mbox->mutex); ++ return ret; ++} ++ ++static __poll_t aspeed_mbox_poll(struct file *file, poll_table *wait) ++{ ++ struct aspeed_mbox *mbox = file_mbox(file); ++ const struct aspeed_mbox_model *model = mbox->model; ++ __poll_t mask = 0; ++ ++ poll_wait(file, &mbox->queue, wait); ++ ++ if (aspeed_mbox_inb(mbox, model->bcr) & ASPEED_MBOX_BCR_RECV) ++ mask |= POLLIN; ++ ++ return mask; ++} ++ ++static int aspeed_mbox_release(struct inode *inode, struct file *file) ++{ ++ atomic_dec(&aspeed_mbox_open_count); ++ return 0; ++} ++ ++static long aspeed_mbox_ioctl(struct file *file, unsigned int cmd, ++ unsigned long param) ++{ ++ long ret = 0; ++ struct aspeed_mbox *mbox = file_mbox(file); ++ const struct aspeed_mbox_model *model = mbox->model; ++ struct aspeed_mbox_ioctl_data data; ++ ++ switch (cmd) { ++ case ASPEED_MBOX_IOCTL_GET_SIZE: ++ data.data = model->dr_num; ++ if (copy_to_user((void __user *)param, &data, sizeof(data))) ++ ret = -EFAULT; ++ break; ++ default: ++ ret = -ENOTTY; ++ break; ++ } ++ ++ return ret; ++} ++ ++static const struct file_operations aspeed_mbox_fops = { ++ .owner = THIS_MODULE, ++ .llseek = no_seek_end_llseek, ++ .read = aspeed_mbox_read, ++ .write = aspeed_mbox_write, ++ .open = aspeed_mbox_open, ++ .release = aspeed_mbox_release, ++ .poll = aspeed_mbox_poll, ++ .unlocked_ioctl = aspeed_mbox_ioctl, ++}; ++ ++static irqreturn_t aspeed_mbox_irq(int irq, void *arg) ++{ ++ struct aspeed_mbox *mbox = arg; ++ const struct aspeed_mbox_model *model = mbox->model; ++ ++ if (!(aspeed_mbox_inb(mbox, model->bcr) & ASPEED_MBOX_BCR_RECV)) ++ return IRQ_NONE; ++ ++ /* ++ * Leave the status bit set so that we know the data is for us, ++ * clear it once it has been read. ++ */ ++ ++ /* Mask it off, we'll clear it when we the data gets read */ ++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_MASK, model->bcr); ++ ++ wake_up(&mbox->queue); ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_mbox_config_irq(struct aspeed_mbox *mbox, ++ struct platform_device *pdev) ++{ ++ const struct aspeed_mbox_model *model = mbox->model; ++ struct device *dev = &pdev->dev; ++ int i, rc, irq; ++ ++ irq = irq_of_parse_and_map(dev->of_node, 0); ++ if (!irq) ++ return -ENODEV; ++ ++ rc = devm_request_irq(dev, irq, aspeed_mbox_irq, ++ IRQF_SHARED, DEVICE_NAME, mbox); ++ if (rc < 0) { ++ dev_err(dev, "Unable to request IRQ %d\n", irq); ++ return rc; ++ } ++ ++ /* ++ * Disable all register based interrupts. ++ */ ++ for (i = 0; i < model->dr_num / 8; ++i) ++ aspeed_mbox_outb(mbox, 0x00, ASPEED_MBOX_BIE(model->bie, i)); ++ ++ /* These registers are write one to clear. Clear them. */ ++ for (i = 0; i < model->dr_num / 8; ++i) ++ aspeed_mbox_outb(mbox, 0xff, ASPEED_MBOX_STR(model->str, i)); ++ ++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr); ++ return 0; ++} ++ ++static int aspeed_mbox_probe(struct platform_device *pdev) ++{ ++ struct aspeed_mbox *mbox; ++ struct device *dev; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); ++ if (!mbox) ++ return -ENOMEM; ++ ++ dev_set_drvdata(&pdev->dev, mbox); ++ ++ rc = of_property_read_u32(dev->of_node, "reg", &mbox->base); ++ if (rc) { ++ dev_err(dev, "Couldn't read reg device tree property\n"); ++ return rc; ++ } ++ ++ mbox->model = of_device_get_match_data(dev); ++ if (IS_ERR(mbox->model)) { ++ dev_err(dev, "Couldn't get model data\n"); ++ return -ENODEV; ++ } ++ ++ mbox->map = syscon_node_to_regmap( ++ pdev->dev.parent->of_node); ++ if (IS_ERR(mbox->map)) { ++ dev_err(dev, "Couldn't get regmap\n"); ++ return -ENODEV; ++ } ++ ++ mutex_init(&mbox->mutex); ++ init_waitqueue_head(&mbox->queue); ++ ++ mbox->miscdev.minor = MISC_DYNAMIC_MINOR; ++ mbox->miscdev.name = DEVICE_NAME; ++ mbox->miscdev.fops = &aspeed_mbox_fops; ++ mbox->miscdev.parent = dev; ++ rc = misc_register(&mbox->miscdev); ++ if (rc) { ++ dev_err(dev, "Unable to register device\n"); ++ return rc; ++ } ++ ++ rc = aspeed_mbox_config_irq(mbox, pdev); ++ if (rc) { ++ dev_err(dev, "Failed to configure IRQ\n"); ++ misc_deregister(&mbox->miscdev); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void aspeed_mbox_remove(struct platform_device *pdev) ++{ ++ struct aspeed_mbox *mbox = dev_get_drvdata(&pdev->dev); ++ ++ misc_deregister(&mbox->miscdev); ++} ++ ++static const struct aspeed_mbox_model ast2400_model = { ++ .dr_num = 16, ++ .dr = 0x0, ++ .str = 0x40, ++ .bcr = 0x48, ++ .hcr = 0x4c, ++ .bie = 0x50, ++ .hie = 0x58, ++}; ++ ++static const struct aspeed_mbox_model ast2600_model = { ++ .dr_num = 32, ++ .dr = 0x0, ++ .str = 0x80, ++ .bcr = 0x90, ++ .hcr = 0x94, ++ .bie = 0xa0, ++ .hie = 0xb0, ++}; ++ ++static const struct of_device_id aspeed_mbox_match[] = { ++ { .compatible = "aspeed,ast2400-mbox", ++ .data = &ast2400_model }, ++ { .compatible = "aspeed,ast2500-mbox", ++ .data = &ast2400_model }, ++ { .compatible = "aspeed,ast2600-mbox", ++ .data = &ast2600_model }, ++ { }, ++}; ++ ++static struct platform_driver aspeed_mbox_driver = { ++ .driver = { ++ .name = DEVICE_NAME, ++ .of_match_table = aspeed_mbox_match, ++ }, ++ .probe = aspeed_mbox_probe, ++ .remove = aspeed_mbox_remove, ++}; ++ ++module_platform_driver(aspeed_mbox_driver); ++MODULE_DEVICE_TABLE(of, aspeed_mbox_match); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Cyril Bur "); ++MODULE_AUTHOR("Chia-Wei Wang ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "aspeed-lpc-pcc" ++ ++static DEFINE_IDA(aspeed_pcc_ida); ++ ++#define HICR5 0x80 ++#define HICR5_EN_SNP0W BIT(0) ++#define HICR5_EN_SNP1W BIT(2) ++#define HICR6 0x084 ++#define HICR6_EN2BMODE BIT(19) ++#define SNPWADR 0x090 ++#define PCCR6 0x0c4 ++#define PCCR6_DMA_CUR_ADDR GENMASK(27, 0) ++#define PCCR4 0x0d0 ++#define PCCR4_DMA_ADDRL_MASK GENMASK(31, 0) ++#define PCCR4_DMA_ADDRL_SHIFT 0 ++#define PCCR5 0x0d4 ++#define PCCR5_DMA_ADDRH_MASK GENMASK(27, 24) ++#define PCCR5_DMA_ADDRH_SHIFT 24 ++#define PCCR5_DMA_LEN_MASK GENMASK(23, 0) ++#define PCCR5_DMA_LEN_SHIFT 0 ++#define HICRB 0x100 ++#define HICRB_ENSNP0D BIT(14) ++#define HICRB_ENSNP1D BIT(15) ++#define PCCR0 0x130 ++#define PCCR0_EN_DMA_INT BIT(31) ++#define PCCR0_EN_DMA_MODE BIT(14) ++#define PCCR0_ADDR_SEL_MASK GENMASK(13, 12) ++#define PCCR0_ADDR_SEL_SHIFT 12 ++#define PCCR0_RX_TRIG_LVL_MASK GENMASK(10, 8) ++#define PCCR0_RX_TRIG_LVL_SHIFT 8 ++#define PCCR0_CLR_RX_FIFO BIT(7) ++#define PCCR0_MODE_SEL_MASK GENMASK(5, 4) ++#define PCCR0_MODE_SEL_SHIFT 4 ++#define PCCR0_EN_RX_TMOUT_INT BIT(2) ++#define PCCR0_EN_RX_AVAIL_INT BIT(1) ++#define PCCR0_EN BIT(0) ++#define PCCR1 0x134 ++#define PCCR1_BASE_ADDR_MASK GENMASK(15, 0) ++#define PCCR1_BASE_ADDR_SHIFT 0 ++#define PCCR1_DONT_CARE_BITS_MASK GENMASK(21, 16) ++#define PCCR1_DONT_CARE_BITS_SHIFT 16 ++#define PCCR2 0x138 ++#define PCCR2_INT_STATUS_PATTERN_B BIT(16) ++#define PCCR2_INT_STATUS_PATTERN_A BIT(8) ++#define PCCR2_INT_STATUS_DMA_DONE BIT(4) ++#define PCCR2_INT_STATUS_DATA_RDY PCCR2_INT_STATUS_DMA_DONE ++#define PCCR2_INT_STATUS_RX_OVER BIT(3) ++#define PCCR2_INT_STATUS_RX_TMOUT BIT(2) ++#define PCCR2_INT_STATUS_RX_AVAIL BIT(1) ++#define PCCR3 0x13c ++#define PCCR3_FIFO_DATA_MASK GENMASK(7, 0) ++ ++#define PCC_DMA_BUFSZ (256 * SZ_1K) ++ ++enum pcc_fifo_threshold { ++ PCC_FIFO_THR_1_BYTE, ++ PCC_FIFO_THR_1_EIGHTH, ++ PCC_FIFO_THR_2_EIGHTH, ++ PCC_FIFO_THR_3_EIGHTH, ++ PCC_FIFO_THR_4_EIGHTH, ++ PCC_FIFO_THR_5_EIGHTH, ++ PCC_FIFO_THR_6_EIGHTH, ++ PCC_FIFO_THR_7_EIGHTH, ++ PCC_FIFO_THR_8_EIGHTH, ++}; ++ ++enum pcc_record_mode { ++ PCC_REC_1B, ++ PCC_REC_2B, ++ PCC_REC_4B, ++ PCC_REC_FULL, ++}; ++ ++enum pcc_port_hbits_select { ++ PCC_PORT_HBITS_SEL_NONE, ++ PCC_PORT_HBITS_SEL_45, ++ PCC_PORT_HBITS_SEL_67, ++ PCC_PORT_HBITS_SEL_89, ++}; ++ ++struct aspeed_pcc_dma { ++ uint32_t rptr; ++ uint8_t *virt; ++ dma_addr_t addr; ++ uint32_t size; ++}; ++ ++struct aspeed_pcc_ctrl { ++ struct device *dev; ++ struct regmap *regmap; ++ int irq; ++ uint32_t port; ++ struct aspeed_pcc_dma dma; ++ struct kfifo fifo; ++ wait_queue_head_t wq; ++ struct miscdevice mdev; ++ int mdev_id; ++}; ++ ++static inline bool is_valid_rec_mode(uint32_t mode) ++{ ++ return (mode > PCC_REC_FULL) ? false : true; ++} ++ ++static inline bool is_valid_high_bits_select(uint32_t sel) ++{ ++ return (sel > PCC_PORT_HBITS_SEL_89) ? false : true; ++} ++ ++static ssize_t aspeed_pcc_file_read(struct file *file, char __user *buffer, ++ size_t count, loff_t *ppos) ++{ ++ int rc; ++ unsigned int copied; ++ struct aspeed_pcc_ctrl *pcc = container_of(file->private_data, ++ struct aspeed_pcc_ctrl, ++ mdev); ++ ++ if (kfifo_is_empty(&pcc->fifo)) { ++ if (file->f_flags & O_NONBLOCK) ++ return -EAGAIN; ++ ++ rc = wait_event_interruptible(pcc->wq, ++ !kfifo_is_empty(&pcc->fifo)); ++ if (rc == -ERESTARTSYS) ++ return -EINTR; ++ } ++ ++ rc = kfifo_to_user(&pcc->fifo, buffer, count, &copied); ++ ++ return rc ? rc : copied; ++} ++ ++static __poll_t aspeed_pcc_file_poll(struct file *file, ++ struct poll_table_struct *pt) ++{ ++ struct aspeed_pcc_ctrl *pcc = container_of(file->private_data, ++ struct aspeed_pcc_ctrl, ++ mdev); ++ ++ poll_wait(file, &pcc->wq, pt); ++ ++ return !kfifo_is_empty(&pcc->fifo) ? POLLIN : 0; ++} ++ ++static const struct file_operations pcc_fops = { ++ .owner = THIS_MODULE, ++ .read = aspeed_pcc_file_read, ++ .poll = aspeed_pcc_file_poll, ++}; ++ ++static irqreturn_t aspeed_pcc_dma_isr(int irq, void *arg) ++{ ++ uint32_t reg, rptr, wptr; ++ struct aspeed_pcc_ctrl *pcc = (struct aspeed_pcc_ctrl *)arg; ++ struct kfifo *fifo = &pcc->fifo; ++ ++ regmap_write_bits(pcc->regmap, PCCR2, PCCR2_INT_STATUS_DMA_DONE, PCCR2_INT_STATUS_DMA_DONE); ++ ++ regmap_read(pcc->regmap, PCCR6, ®); ++ wptr = (reg & PCCR6_DMA_CUR_ADDR) - (pcc->dma.addr & PCCR6_DMA_CUR_ADDR); ++ rptr = pcc->dma.rptr; ++ ++ do { ++ if (kfifo_is_full(fifo)) ++ kfifo_skip(fifo); ++ ++ kfifo_put(fifo, pcc->dma.virt[rptr]); ++ ++ rptr = (rptr + 1) % pcc->dma.size; ++ } while (rptr != wptr); ++ ++ pcc->dma.rptr = rptr; ++ ++ wake_up_interruptible(&pcc->wq); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_pcc_isr(int irq, void *arg) ++{ ++ uint32_t sts; ++ struct aspeed_pcc_ctrl *pcc = (struct aspeed_pcc_ctrl *)arg; ++ ++ regmap_read(pcc->regmap, PCCR2, &sts); ++ ++ if (!(sts & (PCCR2_INT_STATUS_RX_TMOUT | ++ PCCR2_INT_STATUS_RX_AVAIL | ++ PCCR2_INT_STATUS_DMA_DONE))) ++ return IRQ_NONE; ++ ++ return aspeed_pcc_dma_isr(irq, arg); ++} ++ ++/* ++ * A2600-15 AP note ++ * ++ * SW workaround to prevent generating Non-Fatal-Error (NFE) ++ * eSPI response when PCC is used for port I/O byte snooping ++ * over eSPI. ++ */ ++static int aspeed_a2600_15(struct aspeed_pcc_ctrl *pcc, struct device *dev) ++{ ++ u32 hicr5_en, hicrb_en; ++ ++ /* abort if snoop is enabled */ ++ regmap_read(pcc->regmap, HICR5, &hicr5_en); ++ if (hicr5_en & (HICR5_EN_SNP0W | HICR5_EN_SNP1W)) { ++ dev_err(dev, "A2600-15 should be applied with snoop disabled\n"); ++ return -EPERM; ++ } ++ ++ /* set SNPWADR of snoop device */ ++ regmap_write(pcc->regmap, SNPWADR, pcc->port | ((pcc->port + 2) << 16)); ++ ++ /* set HICRB[15:14]=11b to enable ACCEPT response for SNPWADR */ ++ hicrb_en = HICRB_ENSNP0D | HICRB_ENSNP1D; ++ regmap_update_bits(pcc->regmap, HICRB, hicrb_en, hicrb_en); ++ ++ /* set HICR6[19] to extend SNPWADR to 2x range */ ++ regmap_update_bits(pcc->regmap, HICR6, HICR6_EN2BMODE, HICR6_EN2BMODE); ++ ++ return 0; ++} ++ ++static int aspeed_pcc_enable(struct aspeed_pcc_ctrl *pcc, struct device *dev) ++{ ++ int rc; ++ ++ rc = aspeed_a2600_15(pcc, dev); ++ if (rc) ++ return rc; ++ ++ /* record mode: Set 2-Byte mode. */ ++ regmap_update_bits(pcc->regmap, PCCR0, ++ PCCR0_MODE_SEL_MASK, ++ PCC_REC_2B << PCCR0_MODE_SEL_SHIFT); ++ ++ /* port address */ ++ regmap_update_bits(pcc->regmap, PCCR1, ++ PCCR1_BASE_ADDR_MASK, ++ pcc->port << PCCR1_BASE_ADDR_SHIFT); ++ ++ /* Set address high bits selection to 0b01 for address bit[5:4] */ ++ regmap_update_bits(pcc->regmap, PCCR0, ++ PCCR0_ADDR_SEL_MASK, ++ PCC_PORT_HBITS_SEL_45 << PCCR0_ADDR_SEL_SHIFT); ++ ++ /* Set LPC don't care address to 0x3 for port 80~83h */ ++ regmap_update_bits(pcc->regmap, PCCR1, ++ PCCR1_DONT_CARE_BITS_MASK, ++ 0x3 << PCCR1_DONT_CARE_BITS_SHIFT); ++ ++ /* set DMA ring buffer size and enable interrupts */ ++ regmap_write(pcc->regmap, PCCR4, pcc->dma.addr & 0xffffffff); ++#ifdef CONFIG_ARM64 ++ regmap_update_bits(pcc->regmap, PCCR5, PCCR5_DMA_ADDRH_MASK, ++ (pcc->dma.addr >> 32) << PCCR5_DMA_ADDRH_SHIFT); ++#endif ++ regmap_update_bits(pcc->regmap, PCCR5, PCCR5_DMA_LEN_MASK, ++ (pcc->dma.size / 4) << PCCR5_DMA_LEN_SHIFT); ++ regmap_update_bits(pcc->regmap, PCCR0, ++ PCCR0_EN_DMA_INT | PCCR0_EN_DMA_MODE, ++ PCCR0_EN_DMA_INT | PCCR0_EN_DMA_MODE); ++ ++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_EN, PCCR0_EN); ++ ++ return 0; ++} ++ ++static int aspeed_pcc_disable(struct aspeed_pcc_ctrl *pcc) ++{ ++ /* Disable PCC and DMA Mode for safety */ ++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_EN | PCCR0_EN_DMA_MODE, 0); ++ ++ /* Clear Rx FIFO. */ ++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_CLR_RX_FIFO, 1); ++ ++ /* Clear All interrupts status. */ ++ regmap_write(pcc->regmap, PCCR2, ++ PCCR2_INT_STATUS_RX_OVER | PCCR2_INT_STATUS_DMA_DONE | ++ PCCR2_INT_STATUS_PATTERN_A | PCCR2_INT_STATUS_PATTERN_B); ++ ++ return 0; ++} ++ ++static int aspeed_pcc_probe(struct platform_device *pdev) ++{ ++ int rc; ++ struct aspeed_pcc_ctrl *pcc; ++ struct device *dev = &pdev->dev; ++ uint32_t fifo_size = PAGE_SIZE; ++ ++ pcc = devm_kzalloc(dev, sizeof(*pcc), GFP_KERNEL); ++ if (!pcc) ++ return -ENOMEM; ++ ++ pcc->regmap = syscon_node_to_regmap(dev->parent->of_node); ++ if (IS_ERR(pcc->regmap)) ++ return dev_err_probe(dev, PTR_ERR(pcc->regmap), "Couldn't get regmap\n"); ++ ++ rc = of_property_read_u32(dev->of_node, "pcc-ports", &pcc->port); ++ if (rc) { ++ dev_err(dev, "no pcc ports configured\n"); ++ return rc; ++ } ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return rc; ++ } ++ ++ pcc->dma.size = PCC_DMA_BUFSZ; ++ pcc->dma.virt = dmam_alloc_coherent(dev, ++ pcc->dma.size, ++ &pcc->dma.addr, ++ GFP_KERNEL); ++ if (!pcc->dma.virt) { ++ dev_err(dev, "cannot allocate DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ fifo_size = roundup(pcc->dma.size, PAGE_SIZE); ++ rc = kfifo_alloc(&pcc->fifo, fifo_size, GFP_KERNEL); ++ if (rc) ++ return rc; ++ ++ /* Disable PCC to clean up DMA buffer before request IRQ. */ ++ rc = aspeed_pcc_disable(pcc); ++ if (rc) { ++ dev_err(dev, "Couldn't disable PCC\n"); ++ goto err_free_kfifo; ++ } ++ ++ pcc->irq = platform_get_irq(pdev, 0); ++ if (pcc->irq < 0) { ++ rc = pcc->irq; ++ goto err_free_kfifo; ++ } ++ ++ rc = devm_request_irq(dev, pcc->irq, aspeed_pcc_isr, 0, DEVICE_NAME, pcc); ++ if (rc < 0) { ++ dev_err(dev, "Couldn't request IRQ %d\n", pcc->irq); ++ goto err_free_kfifo; ++ } ++ ++ init_waitqueue_head(&pcc->wq); ++ ++ pcc->mdev_id = ida_alloc(&aspeed_pcc_ida, GFP_KERNEL); ++ if (pcc->mdev_id < 0) { ++ dev_err(dev, "Couldn't allocate ID\n"); ++ goto err_free_kfifo; ++ } ++ ++ pcc->mdev.parent = dev; ++ pcc->mdev.minor = MISC_DYNAMIC_MINOR; ++ pcc->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, ++ pcc->mdev_id); ++ pcc->mdev.fops = &pcc_fops; ++ rc = misc_register(&pcc->mdev); ++ if (rc) { ++ dev_err(dev, "Couldn't register misc device\n"); ++ goto err_free_ida; ++ } ++ ++ rc = aspeed_pcc_enable(pcc, dev); ++ if (rc) { ++ dev_err(dev, "Couldn't enable PCC\n"); ++ goto err_dereg_mdev; ++ } ++ ++ dev_set_drvdata(dev, pcc); ++ ++ return 0; ++ ++err_dereg_mdev: ++ misc_deregister(&pcc->mdev); ++ ++err_free_ida: ++ ida_free(&aspeed_pcc_ida, pcc->mdev_id); ++ ++err_free_kfifo: ++ kfifo_free(&pcc->fifo); ++ ++ return rc; ++} ++ ++static void aspeed_pcc_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct aspeed_pcc_ctrl *pcc = dev_get_drvdata(dev); ++ ++ kfifo_free(&pcc->fifo); ++ ida_free(&aspeed_pcc_ida, pcc->mdev_id); ++ misc_deregister(&pcc->mdev); ++} ++ ++static const struct of_device_id aspeed_pcc_table[] = { ++ { .compatible = "aspeed,ast2600-lpc-pcc" }, ++ { }, ++}; ++ ++static struct platform_driver aspeed_pcc_driver = { ++ .driver = { ++ .name = "aspeed-pcc", ++ .of_match_table = aspeed_pcc_table, ++ }, ++ .probe = aspeed_pcc_probe, ++ .remove = aspeed_pcc_remove, ++}; ++ ++module_platform_driver(aspeed_pcc_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Driver for Aspeed Post Code Capture"); +diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c +--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c 2025-12-23 10:16:21.124032669 +0000 +@@ -11,7 +11,7 @@ + */ + + #include +-#include ++#include + #include + #include + #include +@@ -25,7 +25,6 @@ + + #define DEVICE_NAME "aspeed-lpc-snoop" + +-#define NUM_SNOOP_CHANNELS 2 + #define SNOOP_FIFO_SIZE 2048 + + #define HICR5 0x80 +@@ -36,6 +35,7 @@ + #define HICR6 0x84 + #define HICR6_STR_SNP0W BIT(0) + #define HICR6_STR_SNP1W BIT(1) ++#define HICR6_NFE_WA BIT(20) + #define SNPWADR 0x90 + #define SNPWADR_CH0_MASK GENMASK(15, 0) + #define SNPWADR_CH0_SHIFT 0 +@@ -57,7 +57,22 @@ + unsigned int has_hicrb_ensnp; + }; + ++enum aspeed_lpc_snoop_index { ++ ASPEED_LPC_SNOOP_INDEX_0 = 0, ++ ASPEED_LPC_SNOOP_INDEX_1 = 1, ++ ASPEED_LPC_SNOOP_INDEX_MAX = ASPEED_LPC_SNOOP_INDEX_1, ++}; ++ ++struct aspeed_lpc_snoop_channel_cfg { ++ enum aspeed_lpc_snoop_index index; ++ u32 hicr5_en; ++ u32 snpwadr_mask; ++ u32 snpwadr_shift; ++ u32 hicrb_en; ++}; ++ + struct aspeed_lpc_snoop_channel { ++ const struct aspeed_lpc_snoop_channel_cfg *cfg; + bool enabled; + struct kfifo fifo; + wait_queue_head_t wq; +@@ -67,10 +82,28 @@ + struct aspeed_lpc_snoop { + struct regmap *regmap; + int irq; +- struct clk *clk; +- struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS]; ++ struct aspeed_lpc_snoop_channel chan[ASPEED_LPC_SNOOP_INDEX_MAX + 1]; + }; + ++static const struct aspeed_lpc_snoop_channel_cfg channel_cfgs[ASPEED_LPC_SNOOP_INDEX_MAX + 1] = { ++ { ++ .index = ASPEED_LPC_SNOOP_INDEX_0, ++ .hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W, ++ .snpwadr_mask = SNPWADR_CH0_MASK, ++ .snpwadr_shift = SNPWADR_CH0_SHIFT, ++ .hicrb_en = HICRB_ENSNP0D, ++ }, ++ { ++ .index = ASPEED_LPC_SNOOP_INDEX_1, ++ .hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W, ++ .snpwadr_mask = SNPWADR_CH1_MASK, ++ .snpwadr_shift = SNPWADR_CH1_SHIFT, ++ .hicrb_en = HICRB_ENSNP1D, ++ }, ++}; ++ ++static DEFINE_IDA(aspeed_lpc_snoop_ida); ++ + static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file) + { + return container_of(file->private_data, +@@ -136,12 +169,19 @@ + return IRQ_NONE; + + /* Check if one of the snoop channels is interrupting */ +- reg &= (HICR6_STR_SNP0W | HICR6_STR_SNP1W); +- if (!reg) ++ if (!(reg & (HICR6_STR_SNP0W | HICR6_STR_SNP1W))) + return IRQ_NONE; + +- /* Ack pending IRQs */ +- regmap_write(lpc_snoop->regmap, HICR6, reg); ++ /* Check if NFE WA is set */ ++ if (reg & HICR6_NFE_WA) { ++ /* Ack pending IRQs with keeping NFE WA */ ++ regmap_write(lpc_snoop->regmap, HICR6, ++ (HICR6_STR_SNP0W | HICR6_STR_SNP1W | HICR6_NFE_WA)); ++ } else { ++ /* Ack pending IRQs */ ++ regmap_write(lpc_snoop->regmap, HICR6, ++ (HICR6_STR_SNP0W | HICR6_STR_SNP1W)); ++ } + + /* Read and save most recent snoop'ed data byte to FIFO */ + regmap_read(lpc_snoop->regmap, SNPWDR, &data); +@@ -182,108 +222,92 @@ + return 0; + } + +-static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, +- struct device *dev, +- int channel, u16 lpc_port) ++__attribute__((nonnull)) ++static int aspeed_lpc_enable_snoop(struct device *dev, ++ struct aspeed_lpc_snoop *lpc_snoop, ++ struct aspeed_lpc_snoop_channel *channel, ++ const struct aspeed_lpc_snoop_channel_cfg *cfg, ++ u16 lpc_port) + { +- int rc = 0; +- u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en; +- const struct aspeed_lpc_snoop_model_data *model_data = +- of_device_get_match_data(dev); ++ const struct aspeed_lpc_snoop_model_data *model_data; ++ int rc = 0, id; + +- if (WARN_ON(lpc_snoop->chan[channel].enabled)) ++ if (WARN_ON(channel->enabled)) + return -EBUSY; + +- init_waitqueue_head(&lpc_snoop->chan[channel].wq); +- /* Create FIFO datastructure */ +- rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo, +- SNOOP_FIFO_SIZE, GFP_KERNEL); ++ init_waitqueue_head(&channel->wq); ++ ++ channel->cfg = cfg; ++ channel->miscdev.minor = MISC_DYNAMIC_MINOR; ++ channel->miscdev.fops = &snoop_fops; ++ channel->miscdev.parent = dev; ++ ++ id = ida_alloc(&aspeed_lpc_snoop_ida, GFP_KERNEL); ++ if (id < 0) ++ return id; ++ ++ channel->miscdev.name = ++ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, id); ++ if (!channel->miscdev.name) ++ return -ENOMEM; ++ ++ rc = kfifo_alloc(&channel->fifo, SNOOP_FIFO_SIZE, GFP_KERNEL); + if (rc) + return rc; + +- lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR; +- lpc_snoop->chan[channel].miscdev.name = +- devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel); +- if (!lpc_snoop->chan[channel].miscdev.name) { +- rc = -ENOMEM; +- goto err_free_fifo; +- } +- lpc_snoop->chan[channel].miscdev.fops = &snoop_fops; +- lpc_snoop->chan[channel].miscdev.parent = dev; +- rc = misc_register(&lpc_snoop->chan[channel].miscdev); ++ rc = misc_register(&channel->miscdev); + if (rc) + goto err_free_fifo; + + /* Enable LPC snoop channel at requested port */ +- switch (channel) { +- case 0: +- hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W; +- snpwadr_mask = SNPWADR_CH0_MASK; +- snpwadr_shift = SNPWADR_CH0_SHIFT; +- hicrb_en = HICRB_ENSNP0D; +- break; +- case 1: +- hicr5_en = HICR5_EN_SNP1W | HICR5_ENINT_SNP1W; +- snpwadr_mask = SNPWADR_CH1_MASK; +- snpwadr_shift = SNPWADR_CH1_SHIFT; +- hicrb_en = HICRB_ENSNP1D; +- break; +- default: +- rc = -EINVAL; +- goto err_misc_deregister; +- } +- +- regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en); +- regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask, +- lpc_port << snpwadr_shift); +- if (model_data->has_hicrb_ensnp) +- regmap_update_bits(lpc_snoop->regmap, HICRB, +- hicrb_en, hicrb_en); ++ regmap_set_bits(lpc_snoop->regmap, HICR5, cfg->hicr5_en); ++ regmap_update_bits(lpc_snoop->regmap, SNPWADR, cfg->snpwadr_mask, ++ lpc_port << cfg->snpwadr_shift); ++ ++ model_data = of_device_get_match_data(dev); ++ if (model_data && model_data->has_hicrb_ensnp) ++ regmap_set_bits(lpc_snoop->regmap, HICRB, cfg->hicrb_en); + +- lpc_snoop->chan[channel].enabled = true; ++ channel->enabled = true; + + return 0; + +-err_misc_deregister: +- misc_deregister(&lpc_snoop->chan[channel].miscdev); + err_free_fifo: +- kfifo_free(&lpc_snoop->chan[channel].fifo); ++ kfifo_free(&channel->fifo); + return rc; + } + ++__attribute__((nonnull)) + static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, +- int channel) ++ struct aspeed_lpc_snoop_channel *channel) + { +- if (!lpc_snoop->chan[channel].enabled) ++ if (!channel->enabled) + return; + +- switch (channel) { +- case 0: +- regmap_update_bits(lpc_snoop->regmap, HICR5, +- HICR5_EN_SNP0W | HICR5_ENINT_SNP0W, +- 0); +- break; +- case 1: +- regmap_update_bits(lpc_snoop->regmap, HICR5, +- HICR5_EN_SNP1W | HICR5_ENINT_SNP1W, +- 0); +- break; +- default: +- return; +- } ++ /* Disable interrupts along with the device */ ++ regmap_clear_bits(lpc_snoop->regmap, HICR5, channel->cfg->hicr5_en); + +- lpc_snoop->chan[channel].enabled = false; ++ channel->enabled = false; + /* Consider improving safety wrt concurrent reader(s) */ +- misc_deregister(&lpc_snoop->chan[channel].miscdev); +- kfifo_free(&lpc_snoop->chan[channel].fifo); ++ misc_deregister(&channel->miscdev); ++ kfifo_free(&channel->fifo); ++} ++ ++static void aspeed_lpc_snoop_remove(struct platform_device *pdev) ++{ ++ struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev); ++ ++ /* Disable both snoop channels */ ++ aspeed_lpc_disable_snoop(lpc_snoop, &lpc_snoop->chan[0]); ++ aspeed_lpc_disable_snoop(lpc_snoop, &lpc_snoop->chan[1]); + } + + static int aspeed_lpc_snoop_probe(struct platform_device *pdev) + { + struct aspeed_lpc_snoop *lpc_snoop; +- struct device *dev; + struct device_node *np; +- u32 port; ++ struct device *dev; ++ int idx; + int rc; + + dev = &pdev->dev; +@@ -293,77 +317,40 @@ + return -ENOMEM; + + np = pdev->dev.parent->of_node; +- if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") && +- !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") && +- !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) { +- dev_err(dev, "unsupported LPC device binding\n"); +- return -ENODEV; +- } + + lpc_snoop->regmap = syscon_node_to_regmap(np); +- if (IS_ERR(lpc_snoop->regmap)) { +- dev_err(dev, "Couldn't get regmap\n"); +- return -ENODEV; +- } ++ if (IS_ERR(lpc_snoop->regmap)) ++ return dev_err_probe(dev, PTR_ERR(lpc_snoop->regmap), "Couldn't get regmap\n"); + + dev_set_drvdata(&pdev->dev, lpc_snoop); + +- rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port); +- if (rc) { +- dev_err(dev, "no snoop ports configured\n"); +- return -ENODEV; +- } +- +- lpc_snoop->clk = devm_clk_get(dev, NULL); +- if (IS_ERR(lpc_snoop->clk)) { +- rc = PTR_ERR(lpc_snoop->clk); +- if (rc != -EPROBE_DEFER) +- dev_err(dev, "couldn't get clock\n"); +- return rc; +- } +- rc = clk_prepare_enable(lpc_snoop->clk); +- if (rc) { +- dev_err(dev, "couldn't enable clock\n"); +- return rc; +- } +- + rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev); + if (rc) +- goto err; ++ return rc; + +- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port); +- if (rc) +- goto err; ++ static_assert(ARRAY_SIZE(channel_cfgs) == ARRAY_SIZE(lpc_snoop->chan), ++ "Broken implementation assumption regarding cfg count"); ++ for (idx = ASPEED_LPC_SNOOP_INDEX_0; idx <= ASPEED_LPC_SNOOP_INDEX_MAX; idx++) { ++ u32 port; ++ ++ rc = of_property_read_u32_index(dev->of_node, "snoop-ports", idx, &port); ++ if (rc) ++ break; + +- /* Configuration of 2nd snoop channel port is optional */ +- if (of_property_read_u32_index(dev->of_node, "snoop-ports", +- 1, &port) == 0) { +- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port); +- if (rc) { +- aspeed_lpc_disable_snoop(lpc_snoop, 0); +- goto err; +- } ++ rc = aspeed_lpc_enable_snoop(dev, lpc_snoop, &lpc_snoop->chan[idx], ++ &channel_cfgs[idx], port); ++ if (rc) ++ goto cleanup_channels; + } + +- return 0; ++ return idx == ASPEED_LPC_SNOOP_INDEX_0 ? -ENODEV : 0; + +-err: +- clk_disable_unprepare(lpc_snoop->clk); ++cleanup_channels: ++ aspeed_lpc_snoop_remove(pdev); + + return rc; + } + +-static void aspeed_lpc_snoop_remove(struct platform_device *pdev) +-{ +- struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev); +- +- /* Disable both snoop channels */ +- aspeed_lpc_disable_snoop(lpc_snoop, 0); +- aspeed_lpc_disable_snoop(lpc_snoop, 1); +- +- clk_disable_unprepare(lpc_snoop->clk); +-} +- + static const struct aspeed_lpc_snoop_model_data ast2400_model_data = { + .has_hicrb_ensnp = 0, + }; +@@ -388,7 +375,7 @@ + .of_match_table = aspeed_lpc_snoop_match, + }, + .probe = aspeed_lpc_snoop_probe, +- .remove_new = aspeed_lpc_snoop_remove, ++ .remove = aspeed_lpc_snoop_remove, + }; + + module_platform_driver(aspeed_lpc_snoop_driver); +diff --git a/drivers/soc/aspeed/aspeed-mbox.c b/drivers/soc/aspeed/aspeed-mbox.c +--- a/drivers/soc/aspeed/aspeed-mbox.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-mbox.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,312 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright Aspeed Technology Inc. (C) 2025. All rights reserved ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define __ASPEED_MBOX_IOCTL_MAGIC 'X' ++#define ASPEED_MBOX_IOCTL_CAPS _IOR(__ASPEED_MBOX_IOCTL_MAGIC, 0, uint32_t[4]) ++#define ASPEED_MBOX_IOCTL_SEND _IOW(__ASPEED_MBOX_IOCTL_MAGIC, 1, uint32_t[8]) ++#define ASPEED_MBOX_IOCTL_RECV _IOR(__ASPEED_MBOX_IOCTL_MAGIC, 2, uint32_t[8]) ++ ++/** ++ * struct aspeed_mem - Description of a ASPEED memory buffer ++ * @buf: Shared memory base address ++ * @size: Shared memory byte size ++ */ ++struct aspeed_mem { ++ void __iomem *buf; ++ phys_addr_t phys_addr; ++ resource_size_t size; ++}; ++ ++/* ++ * Message prototype of IPC. It should be defined per your usage. ++ * cmd: type of message ++ * len: length of message ++ */ ++struct ast_mbox_msg { ++ u32 cmd; ++ u32 len; ++}; ++ ++/* ++ * struct ast_mbox_chan - Description of a ASPEED mailbox channel ++ * @cl: Mailbox client ++ * @mdev: Misc device ++ * @chan: Mailbox channel ++ * @tx_base: Information of tx shmem ++ * @rx_base: Information of rx shmem ++ * @rx_buffer: buffer to store data on callback ++ * @rx_wait: Wait queue for receiving messages ++ * @rx_msg_lock: Spinlock to protect rx_buffer ++ */ ++struct ast_mbox_info { ++ struct device *dev; ++ struct mbox_client cl; ++ struct miscdevice mdev; ++ struct mbox_chan *chan; ++ struct aspeed_mem tx_base; ++ struct aspeed_mem rx_base; ++ char *rx_buffer; ++ wait_queue_head_t rx_wait; ++ spinlock_t rx_msg_lock; /* spinlock to protect rx_buffer */ ++}; ++ ++static ssize_t mbox_read(struct file *fp, char __user *buf, size_t nbytes, loff_t *off) ++{ ++ struct ast_mbox_info *info = container_of(fp->private_data, struct ast_mbox_info, mdev); ++ int ret; ++ ++ if (nbytes == 0) ++ return 0; ++ ++ if (!info->rx_base.buf) { ++ dev_err(info->dev, "No RX shmem\n"); ++ return -EINVAL; ++ } ++ ++ if (nbytes > info->rx_base.size) { ++ dev_warn(info->dev, "Read size %zu exceeds RX shmem size %llu\n", ++ nbytes, info->rx_base.size); ++ nbytes = info->rx_base.size; ++ } ++ ++ ret = copy_to_user((void __user *)buf, info->rx_base.buf, nbytes); ++ if (ret) ++ return -EFAULT; ++ ++ return nbytes; ++} ++ ++static ssize_t mbox_write(struct file *fp, const char *buf, size_t nbytes, loff_t *off) ++{ ++ struct ast_mbox_info *info = container_of(fp->private_data, struct ast_mbox_info, mdev); ++ int ret; ++ ++ if (nbytes == 0) ++ return 0; ++ ++ if (!info->tx_base.buf) { ++ dev_err(info->dev, "No TX shmem\n"); ++ return -EINVAL; ++ } ++ ++ if (nbytes > info->tx_base.size) { ++ dev_warn(info->dev, "Write size %zu exceeds TX shmem size %llu\n", ++ nbytes, info->tx_base.size); ++ nbytes = info->tx_base.size; ++ } ++ ++ ret = copy_from_user(info->tx_base.buf, (void __user *)buf, nbytes); ++ if (ret) ++ return -EFAULT; ++ ++ return nbytes; ++} ++ ++static __poll_t mbox_poll(struct file *fp, struct poll_table_struct *wait) ++{ ++ struct ast_mbox_info *info = container_of(fp->private_data, struct ast_mbox_info, mdev); ++ __poll_t mask = 0; ++ ++ poll_wait(fp, &info->rx_wait, wait); ++ ++ if (info->rx_buffer) ++ mask |= POLLIN | POLLRDNORM; ++ ++ return mask; ++} ++ ++static long mbox_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast_mbox_info *info = container_of(fp->private_data, struct ast_mbox_info, mdev); ++ u32 *data; ++ u32 msg[8]; ++ unsigned long flags; ++ int ret = 0; ++ ++ switch (cmd) { ++ case ASPEED_MBOX_IOCTL_CAPS: ++ data = (u32 *)arg; ++ data[0] = (info->tx_base.phys_addr) >> 8; ++ data[1] = info->tx_base.size; ++ data[2] = (info->rx_base.phys_addr) >> 8; ++ data[3] = info->rx_base.size; ++ break; ++ case ASPEED_MBOX_IOCTL_SEND: ++ ret = copy_from_user(msg, (void __user *)arg, sizeof(msg)); ++ if (ret) { ++ dev_dbg(info->dev, "Failed to copy message from user\n"); ++ return -EFAULT; ++ } ++ ++ ret = mbox_send_message(info->chan, msg); ++ if (ret < 0) ++ dev_dbg(info->dev, "Failed to send message via mailbox\n"); ++ break; ++ case ASPEED_MBOX_IOCTL_RECV: ++ spin_lock_irqsave(&info->rx_msg_lock, flags); ++ if (!info->rx_buffer) { ++ spin_unlock_irqrestore(&info->rx_msg_lock, flags); ++ dev_dbg(info->dev, "No message received\n"); ++ return -EAGAIN; ++ } ++ ret = copy_to_user((void __user *)arg, info->rx_buffer, sizeof(msg)); ++ info->rx_buffer = NULL; ++ spin_unlock_irqrestore(&info->rx_msg_lock, flags); ++ if (ret) { ++ dev_dbg(info->dev, "Failed to copy message to user\n"); ++ return -EFAULT; ++ } ++ break; ++ default: ++ dev_err(info->dev, "Unsupported cmd: %x\n", cmd); ++ ret = -EINVAL; ++ } ++ ++ return ret; ++} ++ ++static const struct file_operations aspeed_mbox_fops = { ++ .owner = THIS_MODULE, ++ .read = mbox_read, ++ .write = mbox_write, ++ .poll = mbox_poll, ++ .unlocked_ioctl = mbox_ioctl, ++}; ++ ++static void mbox_rx_callback(struct mbox_client *client, void *message) ++{ ++ struct ast_mbox_info *info = container_of(client, struct ast_mbox_info, cl); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&info->rx_msg_lock, flags); ++ info->rx_buffer = message; ++ spin_unlock_irqrestore(&info->rx_msg_lock, flags); ++ ++ wake_up_interruptible(&info->rx_wait); ++} ++ ++static int aspeed_mbox_probe(struct platform_device *pdev) ++{ ++ struct ast_mbox_info *info; ++ struct resource *res; ++ struct device *dev = &pdev->dev; ++ int ret; ++ u32 tx_tout = -1; ++ ++ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return -ENOMEM; ++ ++ info->tx_base.buf = devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ if (PTR_ERR(info->tx_base.buf) == -EBUSY) { ++ /* if reserved area in SRAM, try just ioremap */ ++ info->tx_base.size = resource_size(res); ++ info->tx_base.buf = devm_ioremap(dev, res->start, info->tx_base.size); ++ } ++ if (IS_ERR(info->tx_base.buf)) { ++ info->tx_base.buf = NULL; ++ } else { ++ info->tx_base.phys_addr = res->start; ++ info->tx_base.size = resource_size(res); ++ } ++ ++ info->rx_base.buf = devm_platform_get_and_ioremap_resource(pdev, 1, &res); ++ if (PTR_ERR(info->rx_base.buf) == -EBUSY) { ++ /* if reserved area in SRAM, try just ioremap */ ++ info->rx_base.size = resource_size(res); ++ info->rx_base.buf = devm_ioremap(dev, res->start, info->rx_base.size); ++ } ++ if (IS_ERR(info->rx_base.buf)) { ++ info->rx_base = info->tx_base; ++ } else { ++ info->rx_base.phys_addr = res->start; ++ info->rx_base.size = resource_size(res); ++ } ++ ++ if (device_property_read_u32(dev, "aspeed,tx-timeout", &tx_tout)) ++ tx_tout = -1; ++ ++ dev_info(dev, "TX shmem: phys 0x%pa size %llu\n", ++ &info->tx_base.phys_addr, info->tx_base.size); ++ dev_info(dev, "RX shmem: phys 0x%pa size %llu\n", ++ &info->rx_base.phys_addr, info->rx_base.size); ++ dev_info(dev, "TX timeout: %u ms\n", tx_tout); ++ ++ info->cl.dev = dev; ++ info->cl.rx_callback = mbox_rx_callback; ++ info->cl.tx_block = true; ++ info->cl.knows_txdone = false; ++ info->cl.tx_tout = tx_tout; ++ ++ info->chan = mbox_request_channel(&info->cl, 0); ++ if (IS_ERR(info->chan)) { ++ dev_err(dev, "failed to request channel err %ld\n", ++ PTR_ERR(info->chan)); ++ return -EPROBE_DEFER; ++ } ++ ++ info->rx_buffer = NULL; ++ init_waitqueue_head(&info->rx_wait); ++ spin_lock_init(&info->rx_msg_lock); ++ info->dev = dev; ++ platform_set_drvdata(pdev, info); ++ ++ info->mdev.parent = dev; ++ info->mdev.minor = MISC_DYNAMIC_MINOR; ++ info->mdev.name = dev_name(dev); ++ info->mdev.fops = &aspeed_mbox_fops; ++ ret = misc_register(&info->mdev); ++ if (ret) { ++ dev_err(dev, "failed to register misc device\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void aspeed_mbox_remove(struct platform_device *pdev) ++{ ++ struct ast_mbox_info *info = platform_get_drvdata(pdev); ++ ++ mbox_free_channel(info->chan); ++ misc_deregister(&info->mdev); ++} ++ ++static const struct of_device_id mbox_cl_match[] = { ++ { .compatible = "aspeed,aspeed-mbox" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mbox_test_match); ++ ++static struct platform_driver aspeed_mbox_driver = { ++ .driver = { ++ .name = "aspeed_mbox_client", ++ .of_match_table = mbox_cl_match, ++ }, ++ .probe = aspeed_mbox_probe, ++ .remove = aspeed_mbox_remove, ++}; ++module_platform_driver(aspeed_mbox_driver); ++ ++MODULE_AUTHOR("Jammy Huang "); ++MODULE_DESCRIPTION("ASPEED MBOX CLIENT driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/aspeed-mctp.c b/drivers/soc/aspeed/aspeed-mctp.c +--- a/drivers/soc/aspeed/aspeed-mctp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-mctp.c 2025-12-23 10:16:21.124032669 +0000 +@@ -0,0 +1,2658 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// Copyright (c) 2020, Intel Corporation. ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++/* AST2600 MCTP Controller registers */ ++#define ASPEED_MCTP_CTRL 0x000 ++#define TX_CMD_TRIGGER BIT(0) ++#define RX_CMD_READY BIT(4) ++#define MATCHING_EID BIT(9) ++ ++#define ASPEED_MCTP_TX_CMD 0x004 ++#define ASPEED_MCTP_RX_CMD 0x008 ++ ++#define ASPEED_MCTP_INT_STS 0x00c ++#define ASPEED_MCTP_INT_EN 0x010 ++#define TX_CMD_SENT_INT BIT(0) ++#define TX_CMD_LAST_INT BIT(1) ++#define TX_CMD_WRONG_INT BIT(2) ++#define RX_CMD_RECEIVE_INT BIT(8) ++#define RX_CMD_NO_MORE_INT BIT(9) ++ ++#define ASPEED_MCTP_EID 0x014 ++#define MEMORY_SPACE_MAPPING GENMASK(31, 28) ++#define MCTP_EID GENMASK(7, 0) ++#define ASPEED_MCTP_OBFF_CTRL 0x018 ++ ++#define ASPEED_MCTP_ENGINE_CTRL 0x01c ++#define TX_MAX_PAYLOAD_SIZE_SHIFT 0 ++#define TX_MAX_PAYLOAD_SIZE_MASK GENMASK(1, TX_MAX_PAYLOAD_SIZE_SHIFT) ++#define TX_MAX_PAYLOAD_SIZE(x) \ ++ (((x) << TX_MAX_PAYLOAD_SIZE_SHIFT) & TX_MAX_PAYLOAD_SIZE_MASK) ++#define RX_MAX_PAYLOAD_SIZE_SHIFT 4 ++#define RX_MAX_PAYLOAD_SIZE_MASK GENMASK(5, RX_MAX_PAYLOAD_SIZE_SHIFT) ++#define RX_MAX_PAYLOAD_SIZE(x) \ ++ (((x) << RX_MAX_PAYLOAD_SIZE_SHIFT) & RX_MAX_PAYLOAD_SIZE_MASK) ++#define FIFO_LAYOUT_SHIFT 8 ++#define FIFO_LAYOUT_MASK GENMASK(9, FIFO_LAYOUT_SHIFT) ++#define FIFO_LAYOUT(x) \ ++ (((x) << FIFO_LAYOUT_SHIFT) & FIFO_LAYOUT_MASK) ++ ++#define ASPEED_MCTP_RX_BUF_ADDR 0x08 ++#define ASPEED_MCTP_RX_BUF_HI_ADDR 0x020 ++#define ASPEED_MCTP_RX_BUF_SIZE 0x024 ++#define ASPEED_MCTP_RX_BUF_RD_PTR 0x028 ++#define UPDATE_RX_RD_PTR BIT(31) ++#define RX_BUF_RD_PTR_MASK GENMASK(11, 0) ++#define ASPEED_MCTP_RX_BUF_WR_PTR 0x02c ++#define RX_BUF_WR_PTR_MASK GENMASK(11, 0) ++ ++#define ASPEED_MCTP_TX_BUF_ADDR 0x04 ++#define ASPEED_MCTP_TX_BUF_HI_ADDR 0x030 ++#define ASPEED_MCTP_TX_BUF_SIZE 0x034 ++#define ASPEED_MCTP_TX_BUF_RD_PTR 0x038 ++#define UPDATE_TX_RD_PTR BIT(31) ++#define TX_BUF_RD_PTR_MASK GENMASK(11, 0) ++#define ASPEED_MCTP_TX_BUF_WR_PTR 0x03c ++#define TX_BUF_WR_PTR_MASK GENMASK(11, 0) ++#define ASPEED_G7_MCTP_PCIE_BDF 0x04c ++ ++#define ADDR_LEN GENMASK(26, 0) ++#define DATA_ADDR(x) (((x) >> 4) & ADDR_LEN) ++ ++/* TX command */ ++#define TX_LAST_CMD BIT(31) ++#define TX_DATA_ADDR_SHIFT 4 ++#define TX_DATA_ADDR_MASK GENMASK(30, TX_DATA_ADDR_SHIFT) ++#define TX_DATA_ADDR(x) \ ++ ((DATA_ADDR(x) << TX_DATA_ADDR_SHIFT) & TX_DATA_ADDR_MASK) ++#define TX_RESERVED_1_MASK GENMASK(1, 0) /* must be 1 */ ++#define TX_RESERVED_1 1 ++#define TX_STOP_AFTER_CMD BIT(16) ++#define TX_INTERRUPT_AFTER_CMD BIT(15) ++#define TX_PACKET_SIZE_SHIFT 2 ++#define TX_PACKET_SIZE_MASK GENMASK(12, TX_PACKET_SIZE_SHIFT) ++#define TX_PACKET_SIZE(x) \ ++ (((x) << TX_PACKET_SIZE_SHIFT) & TX_PACKET_SIZE_MASK) ++#define TX_RESERVED_0_MASK GENMASK(1, 0) /* MBZ */ ++#define TX_RESERVED_0 0 ++ ++/* RX command */ ++#define RX_INTERRUPT_AFTER_CMD BIT(2) ++#define RX_DATA_ADDR_SHIFT 4 ++#define RX_DATA_ADDR_MASK GENMASK(30, RX_DATA_ADDR_SHIFT) ++#define RX_DATA_ADDR(x) \ ++ ((DATA_ADDR(x) << RX_DATA_ADDR_SHIFT) & RX_DATA_ADDR_MASK) ++ ++#define ADDR_LEN_2500 GENMASK(23, 0) ++#define DATA_ADDR_2500(x) (((x) >> 7) & ADDR_LEN_2500) ++ ++/* TX command for ast2500 */ ++#define TX_DATA_ADDR_MASK_2500 GENMASK(30, 8) ++#define TX_DATA_ADDR_2500(x) \ ++ FIELD_PREP(TX_DATA_ADDR_MASK_2500, DATA_ADDR_2500(x)) ++#define TX_PACKET_SIZE_2500(x) \ ++ FIELD_PREP(GENMASK(11, 2), x) ++#define TX_PACKET_DEST_EID GENMASK(7, 0) ++#define TX_PACKET_TARGET_ID GENMASK(31, 16) ++#define TX_PACKET_ROUTING_TYPE BIT(14) ++#define TX_PACKET_TAG_OWNER BIT(13) ++#define TX_PACKET_PADDING_LEN GENMASK(1, 0) ++ ++/* Rx command for ast2500 */ ++#define RX_LAST_CMD BIT(31) ++#define RX_DATA_ADDR_MASK_2500 GENMASK(29, 7) ++#define RX_DATA_ADDR_2500(x) \ ++ FIELD_PREP(RX_DATA_ADDR_MASK_2500, DATA_ADDR_2500(x)) ++#define RX_PACKET_SIZE GENMASK(30, 24) ++#define RX_PACKET_SRC_EID GENMASK(23, 16) ++#define RX_PACKET_ROUTING_TYPE GENMASK(15, 14) ++#define RX_PACKET_TAG_OWNER BIT(13) ++#define RX_PACKET_SEQ_NUMBER GENMASK(12, 11) ++#define RX_PACKET_MSG_TAG GENMASK(10, 8) ++#define RX_PACKET_SOM BIT(7) ++#define RX_PACKET_EOM BIT(6) ++#define RX_PACKET_PADDING_LEN GENMASK(5, 4) ++ ++/* HW buffer sizes */ ++#define TX_PACKET_COUNT 48 ++#define RX_PACKET_COUNT 96 ++#if (RX_PACKET_COUNT % 4 != 0) ++#error The Rx buffer size should be 4-aligned. ++#error 1.Make runaway wrap boundary can be determined in Ast2600 A1/A2. ++#error 2.Fix the runaway read pointer bug in Ast2600 A3. ++#endif ++#define TX_MAX_PACKET_COUNT (TX_BUF_RD_PTR_MASK + 1) ++#define RX_MAX_PACKET_COUNT (RX_BUF_RD_PTR_MASK + 1) ++ ++/* Per client packet cache sizes */ ++#define RX_RING_COUNT 64 ++#define TX_RING_COUNT 64 ++ ++/* PCIe Host Controller registers */ ++#define ASPEED_PCIE_LINK 0x0c0 ++#define PCIE_LINK_STS BIT(5) ++#define ASPEED_PCIE_MISC_STS_1 0x0c4 ++ ++/* PCIe Host Controller registers */ ++#define ASPEED_G7_PCIE_LOCATE 0x300 ++#define PCIE_LOCATE_IO BIT(0) ++#define ASPEED_G7_PCIE_LINK 0x358 ++#define PCIE_G7_LINK_STS BIT(8) ++#define ASPEED_G7_IO_PCIE_LINK 0x344 ++#define PCIE_G7_IO_LINK_STS BIT(18) ++ ++/* PCI address definitions */ ++#define PCI_DEV_NUM_MASK GENMASK(4, 0) ++#define PCI_BUS_NUM_SHIFT 5 ++#define PCI_BUS_NUM_MASK GENMASK(12, PCI_BUS_NUM_SHIFT) ++#define GET_PCI_DEV_NUM(x) ((x) & PCI_DEV_NUM_MASK) ++#define GET_PCI_BUS_NUM(x) (((x) & PCI_BUS_NUM_MASK) >> PCI_BUS_NUM_SHIFT) ++ ++/* MCTP header definitions */ ++#define MCTP_HDR_SRC_EID_OFFSET 14 ++#define MCTP_HDR_TAG_OFFSET 15 ++#define MCTP_HDR_SOM BIT(7) ++#define MCTP_HDR_EOM BIT(6) ++#define MCTP_HDR_SOM_EOM (MCTP_HDR_SOM | MCTP_HDR_EOM) ++#define MCTP_PAYLOAD_TYPE_OFFSET 0 ++#define MCTP_HDR_TYPE_CONTROL 0 ++#define MCTP_HDR_TYPE_VDM_PCI 0x7e ++#define MCTP_HDR_TYPE_SPDM 0x5 ++#define MCTP_HDR_TYPE_BASE_LAST MCTP_HDR_TYPE_SPDM ++#define MCTP_PAYLOAD_VENDOR_OFFSET 1 ++#define MCTP_PAYLOAD_VDM_TYPE_OFFSET 3 ++ ++/* MCTP header DW little endian mask definitions */ ++/* 0th DW */ ++#define MCTP_HDR_DW_LE_ROUTING_TYPE GENMASK(26, 24) ++#define MCTP_HDR_DW_LE_PACKET_SIZE GENMASK(9, 0) ++/* 1st DW */ ++#define MCTP_HDR_DW_LE_PADDING_LEN GENMASK(13, 12) ++/* 2nd DW */ ++#define MCTP_HDR_DW_LE_TARGET_ID GENMASK(31, 16) ++/* 3rd DW */ ++#define MCTP_HDR_DW_LE_TAG_OWNER BIT(3) ++#define MCTP_HDR_DW_LE_DEST_EID GENMASK(23, 16) ++ ++#define ASPEED_MCTP_2600 0 ++#define ASPEED_MCTP_2600A3 1 ++ ++#define ASPEED_REVISION_ID0 0x04 ++#define ASPEED_REVISION_ID1 0x14 ++#define ID0_AST2600A0 0x05000303 ++#define ID1_AST2600A0 0x05000303 ++#define ID0_AST2600A1 0x05010303 ++#define ID1_AST2600A1 0x05010303 ++#define ID0_AST2600A2 0x05010303 ++#define ID1_AST2600A2 0x05020303 ++#define ID0_AST2600A3 0x05030303 ++#define ID1_AST2600A3 0x05030303 ++#define ID0_AST2620A1 0x05010203 ++#define ID1_AST2620A1 0x05010203 ++#define ID0_AST2620A2 0x05010203 ++#define ID1_AST2620A2 0x05020203 ++#define ID0_AST2620A3 0x05030203 ++#define ID1_AST2620A3 0x05030203 ++#define ID0_AST2605A2 0x05010103 ++#define ID1_AST2605A2 0x05020103 ++#define ID0_AST2605A3 0x05030103 ++#define ID1_AST2605A3 0x05030103 ++#define ID0_AST2625A3 0x05030403 ++#define ID1_AST2625A3 0x05030403 ++ ++#define ASPEED_G7_SCU_PCIE0_CTRL_OFFSET 0xa60 ++#define ASPEED_G7_SCU_PCIE1_CTRL_OFFSET 0xae0 ++#define ASPEED_G7_SCU_PCIE_CTRL_VDM_EN BIT(1) ++ ++struct aspeed_mctp_match_data { ++ u32 rx_cmd_size; ++ u32 tx_cmd_size; ++ u32 packet_unit_size; ++ bool need_address_mapping; ++ bool vdm_hdr_direct_xfer; ++ bool fifo_auto_surround; ++ bool dma_need_64bits_width; ++ u32 scu_pcie_ctrl_offset; ++}; ++ ++struct aspeed_mctp_rx_cmd { ++ u32 rx_lo; ++ u32 rx_hi; ++}; ++ ++struct aspeed_mctp_tx_cmd { ++ u32 tx_lo; ++ u32 tx_hi; ++}; ++ ++struct aspeed_g7_mctp_tx_cmd { ++ u32 tx_lo; ++ u32 tx_mid; ++ u32 tx_hi; ++ u32 reserved; ++}; ++ ++struct mctp_buffer { ++ void *vaddr; ++ dma_addr_t dma_handle; ++}; ++ ++struct mctp_channel { ++ struct mctp_buffer data; ++ struct mctp_buffer cmd; ++ struct tasklet_struct tasklet; ++ u32 buffer_count; ++ u32 rd_ptr; ++ u32 wr_ptr; ++ bool stopped; ++}; ++ ++struct aspeed_mctp { ++ struct device *dev; ++ struct miscdevice mctp_miscdev; ++ const struct aspeed_mctp_match_data *match_data; ++ struct regmap *map; ++ struct reset_control *reset; ++ /* ++ * The reset of the dma block in the MCTP-RC is connected to ++ * another reset pin. ++ */ ++ struct reset_control *reset_dma; ++ struct mctp_channel tx; ++ struct mctp_channel rx; ++ struct list_head clients; ++ struct mctp_client *default_client; ++ struct list_head mctp_type_handlers; ++ /* ++ * clients_lock protects list of clients, list of type handlers ++ * and default client ++ */ ++ spinlock_t clients_lock; ++ struct list_head endpoints; ++ size_t endpoints_count; ++ /* ++ * endpoints_lock protects list of endpoints ++ */ ++ struct mutex endpoints_lock; ++ struct { ++ struct regmap *map; ++ struct delayed_work rst_dwork; ++ bool need_uevent; ++ } pcie; ++ struct { ++ bool enable; ++ bool first_loop; ++ int packet_counter; ++ } rx_runaway_wa; ++ bool rx_warmup; ++ u8 eid; ++ struct platform_device *peci_mctp; ++ /* Use the flag to identify RC or EP */ ++ bool rc_f; ++ /* Use the flag to identify the support of MCTP interrupt */ ++ bool miss_mctp_int; ++ /* Rx hardware buffer size */ ++ u32 rx_packet_count; ++ /* Rx pointer ring size */ ++ u32 rx_ring_count; ++ /* Tx pointer ring size */ ++ u32 tx_ring_count; ++ /* Delayed work for periodic detection of Rx packets */ ++ struct delayed_work rx_det_dwork; ++ u32 rx_det_period_us; ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++ struct net_device *ndev; ++#endif ++}; ++ ++struct mctp_client { ++ struct kref ref; ++ struct aspeed_mctp *priv; ++ struct ptr_ring tx_queue; ++ struct ptr_ring rx_queue; ++ struct list_head link; ++ wait_queue_head_t wait_queue; ++}; ++ ++struct mctp_type_handler { ++ u8 mctp_type; ++ u16 pci_vendor_id; ++ u16 vdm_type; ++ u16 vdm_mask; ++ struct mctp_client *client; ++ struct list_head link; ++}; ++ ++union aspeed_mctp_eid_data_info { ++ struct aspeed_mctp_eid_info eid_info; ++ struct aspeed_mctp_eid_ext_info eid_ext_info; ++}; ++ ++enum mctp_address_type { ++ ASPEED_MCTP_GENERIC_ADDR_FORMAT = 0, ++ ASPEED_MCTP_EXTENDED_ADDR_FORMAT = 1 ++}; ++ ++struct aspeed_mctp_endpoint { ++ union aspeed_mctp_eid_data_info data; ++ struct list_head link; ++}; ++ ++struct kmem_cache *packet_cache; ++ ++static void data_dump(struct aspeed_mctp *priv, struct mctp_pcie_packet_data *data) ++{ ++ int i; ++ ++ dev_dbg(priv->dev, "Address %zu", (size_t)data); ++ dev_dbg(priv->dev, "VDM header:"); ++ for (i = 0; i < PCIE_VDM_HDR_SIZE_DW; i++) { ++ dev_dbg(priv->dev, "%02x %02x %02x %02x", data->hdr[i] & 0xff, ++ (data->hdr[i] >> 8) & 0xff, (data->hdr[i] >> 16) & 0xff, ++ (data->hdr[i] >> 24) & 0xff); ++ } ++ dev_dbg(priv->dev, "Data payload:"); ++ for (i = 0; i < PCIE_VDM_DATA_SIZE_DW; i++) { ++ dev_dbg(priv->dev, "%02x %02x %02x %02x", ++ data->payload[i] & 0xff, (data->payload[i] >> 8) & 0xff, ++ (data->payload[i] >> 16) & 0xff, ++ (data->payload[i] >> 24) & 0xff); ++ } ++} ++ ++void *aspeed_mctp_packet_alloc(gfp_t flags) ++{ ++ return kmem_cache_alloc(packet_cache, flags); ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_packet_alloc); ++ ++void aspeed_mctp_packet_free(void *packet) ++{ ++ kmem_cache_free(packet_cache, packet); ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_packet_free); ++ ++static int _get_bdf(struct aspeed_mctp *priv) ++{ ++ u32 reg; ++ u16 bdf, devfn; ++ ++ if (priv->match_data->dma_need_64bits_width) { ++ regmap_read(priv->pcie.map, ASPEED_G7_PCIE_LOCATE, ®); ++ if (!(reg & PCIE_LOCATE_IO)) { ++ regmap_read(priv->pcie.map, ASPEED_G7_PCIE_LINK, ®); ++ if (!(reg & PCIE_G7_LINK_STS)) ++ return -ENETDOWN; ++ regmap_read(priv->map, ASPEED_G7_MCTP_PCIE_BDF, ®); ++ bdf = PCI_DEVID(PCI_BUS_NUM(reg), reg & 0xff); ++ } else { ++ regmap_read(priv->pcie.map, ASPEED_G7_IO_PCIE_LINK, ++ ®); ++ if (!(reg & PCIE_G7_IO_LINK_STS)) ++ return -ENETDOWN; ++ regmap_read(priv->map, ASPEED_G7_MCTP_PCIE_BDF, ®); ++ bdf = PCI_DEVID(PCI_BUS_NUM(reg), reg & 0xff); ++ } ++ } else { ++ regmap_read(priv->pcie.map, ASPEED_PCIE_LINK, ®); ++ if (!(reg & PCIE_LINK_STS)) ++ return -ENETDOWN; ++ regmap_read(priv->pcie.map, ASPEED_PCIE_MISC_STS_1, ®); ++ ++ reg = reg & (PCI_BUS_NUM_MASK | PCI_DEV_NUM_MASK); ++ /* only support function 0 */ ++ devfn = GET_PCI_DEV_NUM(reg) << 3; ++ bdf = PCI_DEVID(GET_PCI_BUS_NUM(reg), devfn); ++ } ++ ++ return bdf; ++} ++ ++static uint32_t chip_version(struct device *dev) ++{ ++ struct regmap *scu; ++ u32 revid0, revid1; ++ ++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(scu)) { ++ dev_err(dev, "failed to find 2600 SCU regmap\n"); ++ return PTR_ERR(scu); ++ } ++ regmap_read(scu, ASPEED_REVISION_ID0, &revid0); ++ regmap_read(scu, ASPEED_REVISION_ID1, &revid1); ++ if (revid0 == ID0_AST2600A3 && revid1 == ID1_AST2600A3) { ++ /* AST2600-A3 */ ++ return ASPEED_MCTP_2600A3; ++ } else if (revid0 == ID0_AST2620A3 && revid1 == ID1_AST2620A3) { ++ /* AST2620-A3 */ ++ return ASPEED_MCTP_2600A3; ++ } else if (revid0 == ID0_AST2605A3 && revid1 == ID1_AST2605A3) { ++ /* AST2605-A3 */ ++ return ASPEED_MCTP_2600A3; ++ } else if (revid0 == ID0_AST2625A3 && revid1 == ID1_AST2625A3) { ++ /* AST2605-A3 */ ++ return ASPEED_MCTP_2600A3; ++ } ++ return ASPEED_MCTP_2600; ++} ++ ++static int pcie_vdm_enable(struct device *dev) ++{ ++ int ret = 0; ++ struct regmap *scu; ++ const struct aspeed_mctp_match_data *match_data = ++ of_device_get_match_data(dev); ++ ++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(scu)) { ++ dev_err(dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(scu); ++ } ++ ret = regmap_update_bits(scu, match_data->scu_pcie_ctrl_offset, ++ ASPEED_G7_SCU_PCIE_CTRL_VDM_EN, ++ ASPEED_G7_SCU_PCIE_CTRL_VDM_EN); ++ return ret; ++} ++ ++/* ++ * HW produces and expects VDM header in little endian and payload in network order. ++ * To allow userspace to use network order for the whole packet, PCIe VDM header needs ++ * to be swapped. ++ */ ++static void aspeed_mctp_swap_pcie_vdm_hdr(struct mctp_pcie_packet_data *data) ++{ ++ int i; ++ ++ for (i = 0; i < PCIE_VDM_HDR_SIZE_DW; i++) ++ data->hdr[i] = swab32(data->hdr[i]); ++} ++ ++static void aspeed_mctp_rx_trigger(struct mctp_channel *rx) ++{ ++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx); ++ u32 reg; ++ ++ /* ++ * Even though rx_buf_addr doesn't change, if we don't do the write ++ * here, the HW doesn't trigger RX. We're also clearing the ++ * RX_CMD_READY bit, otherwise we're observing a rare case where ++ * trigger isn't registered by the HW, and we're ending up with stuck ++ * HW (not reacting to wr_ptr writes). ++ * Also, note that we're writing 0 as wr_ptr. If we're writing other ++ * value, the HW behaves in a bizarre way that's hard to explain... ++ */ ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY, 0); ++ if (priv->match_data->fifo_auto_surround) { ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_ADDR, ++ rx->cmd.dma_handle); ++ if (priv->match_data->dma_need_64bits_width) ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_HI_ADDR, ++ upper_32_bits(rx->cmd.dma_handle)); ++ } else { ++ regmap_read(priv->map, ASPEED_MCTP_RX_BUF_ADDR, ®); ++ if (!reg) { ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_ADDR, ++ rx->cmd.dma_handle); ++ } else if (reg == (rx->cmd.dma_handle & GENMASK(28, 3))) { ++ dev_info(priv->dev, ++ "Already initialized - skipping rx dma set\n"); ++ } else { ++ dev_err(priv->dev, ++ "The memory of rx dma can't be changed after the controller is activated\n"); ++ return; ++ } ++ } ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, 0); ++ ++ /* After re-enabling RX we need to restart WA logic */ ++ if (priv->rx_runaway_wa.enable) ++ priv->rx.buffer_count = priv->rx_packet_count; ++ /* ++ * When Rx warmup MCTP controller may store first packet into the 0th to the ++ * 3rd cmd. In ast2600 A3, If the packet isn't stored in the 0th cmd we need ++ * to change the rx buffer size to avoid rx runaway in first loop. In ast2600 ++ * A1/A2, after first loop hardware is guaranteed to use (RX_PACKET_COUNT - 4) ++ * buffers. ++ */ ++ priv->rx_warmup = true; ++ priv->rx_runaway_wa.first_loop = true; ++ priv->rx_runaway_wa.packet_counter = 0; ++ ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY, ++ RX_CMD_READY); ++} ++ ++static void aspeed_mctp_tx_trigger(struct mctp_channel *tx, bool notify) ++{ ++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx); ++ u32 ctrl_val; ++ int ret; ++ ++ if (notify) { ++ if (priv->match_data->dma_need_64bits_width) { ++ struct aspeed_g7_mctp_tx_cmd *last_cmd; ++ ++ last_cmd = (struct aspeed_g7_mctp_tx_cmd *)tx->cmd.vaddr + ++ (tx->wr_ptr - 1) % TX_PACKET_COUNT; ++ last_cmd->tx_lo |= TX_INTERRUPT_AFTER_CMD; ++ } else { ++ struct aspeed_mctp_tx_cmd *last_cmd; ++ ++ last_cmd = (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr + ++ (tx->wr_ptr - 1) % TX_PACKET_COUNT; ++ last_cmd->tx_lo |= TX_INTERRUPT_AFTER_CMD; ++ } ++ } ++ if (priv->match_data->fifo_auto_surround) ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, tx->wr_ptr); ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER, ++ TX_CMD_TRIGGER); ++ ret = regmap_read_poll_timeout_atomic(priv->map, ASPEED_MCTP_CTRL, ++ ctrl_val, ++ !(ctrl_val & TX_CMD_TRIGGER), 0, ++ 1000000); ++ if (ret) { ++ u32 rd_ptr, wr_ptr; ++ ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, UPDATE_RX_RD_PTR); ++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, &rd_ptr); ++ rd_ptr &= RX_BUF_RD_PTR_MASK; ++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, &wr_ptr); ++ wr_ptr &= TX_BUF_RD_PTR_MASK; ++ dev_warn(priv->dev, ++ "Wait tx completed timeout rd_ptr = %x, wr_ptr = %x\n", ++ rd_ptr, wr_ptr); ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER, ++ 0); ++ } ++} ++ ++static void aspeed_mctp_tx_cmd_prep(u32 *tx_hdr, struct aspeed_mctp_tx_cmd *tx_cmd) ++{ ++ u32 packet_size, target_id; ++ u8 dest_eid, padding_len, routing_type, tag_owner; ++ ++ packet_size = FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE, tx_hdr[0]); ++ routing_type = FIELD_GET(MCTP_HDR_DW_LE_ROUTING_TYPE, tx_hdr[0]); ++ routing_type = routing_type ? routing_type - 1 : 0; ++ padding_len = FIELD_GET(MCTP_HDR_DW_LE_PADDING_LEN, tx_hdr[1]); ++ target_id = FIELD_GET(MCTP_HDR_DW_LE_TARGET_ID, tx_hdr[2]); ++ tag_owner = FIELD_GET(MCTP_HDR_DW_LE_TAG_OWNER, tx_hdr[3]); ++ dest_eid = FIELD_GET(MCTP_HDR_DW_LE_DEST_EID, tx_hdr[3]); ++ ++ tx_cmd->tx_hi = FIELD_PREP(TX_PACKET_DEST_EID, dest_eid); ++ tx_cmd->tx_lo = FIELD_PREP(TX_PACKET_TARGET_ID, target_id) | ++ TX_INTERRUPT_AFTER_CMD | ++ FIELD_PREP(TX_PACKET_ROUTING_TYPE, routing_type) | ++ FIELD_PREP(TX_PACKET_TAG_OWNER, tag_owner) | ++ TX_PACKET_SIZE_2500(packet_size) | ++ FIELD_PREP(TX_PACKET_PADDING_LEN, padding_len); ++} ++ ++static void aspeed_mctp_emit_tx_cmd(struct mctp_channel *tx, ++ struct mctp_pcie_packet *packet) ++{ ++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx); ++ struct aspeed_mctp_tx_cmd *tx_cmd = ++ (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr + tx->wr_ptr; ++ struct aspeed_g7_mctp_tx_cmd *tx_cmd_g7 = ++ (struct aspeed_g7_mctp_tx_cmd *)tx->cmd.vaddr + tx->wr_ptr; ++ u32 packet_sz_dw = packet->size / sizeof(u32) - ++ sizeof(packet->data.hdr) / sizeof(u32); ++ u32 offset; ++ ++ data_dump(priv, &packet->data); ++ aspeed_mctp_swap_pcie_vdm_hdr(&packet->data); ++ ++ if (priv->match_data->vdm_hdr_direct_xfer) { ++ offset = tx->wr_ptr * sizeof(packet->data); ++ memcpy((u8 *)tx->data.vaddr + offset, &packet->data, ++ sizeof(packet->data)); ++ if (priv->match_data->dma_need_64bits_width) { ++ tx_cmd_g7->tx_lo = TX_PACKET_SIZE(packet_sz_dw); ++ tx_cmd_g7->tx_mid = TX_RESERVED_1; ++ tx_cmd_g7->tx_mid |= ((tx->data.dma_handle + offset) & ++ GENMASK(31, 4)); ++ tx_cmd_g7->tx_hi = upper_32_bits((tx->data.dma_handle + offset)); ++ } else { ++ tx_cmd->tx_lo = TX_PACKET_SIZE(packet_sz_dw); ++ tx_cmd->tx_hi = TX_RESERVED_1; ++ tx_cmd->tx_hi |= TX_DATA_ADDR(tx->data.dma_handle + offset); ++ } ++ } else { ++ offset = tx->wr_ptr * sizeof(struct mctp_pcie_packet_data_2500); ++ memcpy((u8 *)tx->data.vaddr + offset, packet->data.payload, ++ sizeof(packet->data.payload)); ++ aspeed_mctp_tx_cmd_prep(packet->data.hdr, tx_cmd); ++ tx_cmd->tx_hi |= TX_DATA_ADDR_2500(tx->data.dma_handle + offset); ++ if (tx->wr_ptr == TX_PACKET_COUNT - 1) ++ tx_cmd->tx_hi |= TX_LAST_CMD; ++ } ++ dev_dbg(priv->dev, "tx->wr_prt: %d, tx_cmd: hi:%08x lo:%08x\n", ++ tx->wr_ptr, tx_cmd->tx_hi, tx_cmd->tx_lo); ++ ++ tx->wr_ptr = (tx->wr_ptr + 1) % TX_PACKET_COUNT; ++} ++ ++static struct mctp_client *aspeed_mctp_client_alloc(struct aspeed_mctp *priv) ++{ ++ struct mctp_client *client; ++ ++ client = kzalloc(sizeof(*client), GFP_KERNEL); ++ if (!client) ++ goto out; ++ ++ kref_init(&client->ref); ++ client->priv = priv; ++ ptr_ring_init(&client->tx_queue, priv->tx_ring_count, GFP_KERNEL); ++ ptr_ring_init(&client->rx_queue, priv->rx_ring_count, GFP_ATOMIC); ++ ++out: ++ return client; ++} ++ ++static void aspeed_mctp_client_free(struct kref *ref) ++{ ++ struct mctp_client *client = container_of(ref, typeof(*client), ref); ++ ++ ptr_ring_cleanup(&client->rx_queue, &aspeed_mctp_packet_free); ++ ptr_ring_cleanup(&client->tx_queue, &aspeed_mctp_packet_free); ++ ++ kfree(client); ++} ++ ++static void aspeed_mctp_client_get(struct mctp_client *client) ++{ ++ lockdep_assert_held(&client->priv->clients_lock); ++ ++ kref_get(&client->ref); ++} ++ ++static void aspeed_mctp_client_put(struct mctp_client *client) ++{ ++ kref_put(&client->ref, &aspeed_mctp_client_free); ++} ++ ++static struct mctp_client * ++aspeed_mctp_find_handler(struct aspeed_mctp *priv, ++ struct mctp_pcie_packet *packet) ++{ ++ struct mctp_type_handler *handler; ++ u8 *payload = (u8 *)packet->data.payload; ++ struct mctp_client *client = NULL; ++ u8 mctp_type; ++ u16 vendor = 0; ++ u16 vdm_type = 0; ++ ++ lockdep_assert_held(&priv->clients_lock); ++ ++ mctp_type = payload[MCTP_PAYLOAD_TYPE_OFFSET]; ++ if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) { ++ vendor = *((u16 *)&payload[MCTP_PAYLOAD_VENDOR_OFFSET]); ++ vdm_type = *((u16 *)&payload[MCTP_PAYLOAD_VDM_TYPE_OFFSET]); ++ } ++ ++ list_for_each_entry(handler, &priv->mctp_type_handlers, link) { ++ if (handler->mctp_type == mctp_type && ++ handler->pci_vendor_id == vendor && ++ handler->vdm_type == (vdm_type & handler->vdm_mask)) { ++ dev_dbg(priv->dev, "Found client for type %x vdm %x\n", ++ mctp_type, handler->vdm_type); ++ client = handler->client; ++ break; ++ } ++ } ++ return client; ++} ++ ++static void aspeed_mctp_dispatch_packet(struct aspeed_mctp *priv, ++ struct mctp_pcie_packet *packet) ++{ ++ struct mctp_client *client; ++ int ret; ++ ++ spin_lock(&priv->clients_lock); ++ ++ client = aspeed_mctp_find_handler(priv, packet); ++ ++ if (!client) ++ client = priv->default_client; ++ ++ if (client) ++ aspeed_mctp_client_get(client); ++ ++ spin_unlock(&priv->clients_lock); ++ ++ if (client) { ++ ret = ptr_ring_produce(&client->rx_queue, packet); ++ if (ret) { ++ /* ++ * This can happen if client process does not ++ * consume packets fast enough ++ */ ++ dev_dbg(priv->dev, "Failed to store packet in client RX queue\n"); ++ aspeed_mctp_packet_free(packet); ++ } else { ++ wake_up_all(&client->wait_queue); ++ } ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++ mctp_pcie_vdm_receive_packet(priv->ndev); ++#endif ++ aspeed_mctp_client_put(client); ++ } else { ++ dev_dbg(priv->dev, "Failed to dispatch RX packet\n"); ++ aspeed_mctp_packet_free(packet); ++ } ++} ++ ++static void aspeed_mctp_tx_tasklet(unsigned long data) ++{ ++ struct mctp_channel *tx = (struct mctp_channel *)data; ++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx); ++ struct mctp_client *client; ++ bool trigger = false; ++ bool full = false; ++ u32 rd_ptr; ++ ++ if (priv->match_data->fifo_auto_surround) { ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, UPDATE_RX_RD_PTR); ++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, &rd_ptr); ++ rd_ptr &= TX_BUF_RD_PTR_MASK; ++ } else { ++ rd_ptr = tx->rd_ptr; ++ } ++ ++ spin_lock(&priv->clients_lock); ++ ++ list_for_each_entry(client, &priv->clients, link) { ++ while (!(full = (tx->wr_ptr + 1) % TX_PACKET_COUNT == rd_ptr)) { ++ struct mctp_pcie_packet *packet; ++ ++ packet = ptr_ring_consume(&client->tx_queue); ++ if (!packet) ++ break; ++ ++ aspeed_mctp_emit_tx_cmd(tx, packet); ++ aspeed_mctp_packet_free(packet); ++ trigger = true; ++ } ++ } ++ ++ spin_unlock(&priv->clients_lock); ++ ++ if (trigger) ++ aspeed_mctp_tx_trigger(tx, full); ++} ++ ++static void aspeed_mctp_rx_hdr_prep(struct aspeed_mctp *priv, u8 *hdr, u32 rx_lo) ++{ ++ u16 bdf; ++ u8 routing_type; ++ ++ /* ++ * MCTP controller will map the routing type to reduce one bit ++ * 0 (Route to RC) -> 0, ++ * 2 (Route by ID) -> 1, ++ * 3 (Broadcast from RC) -> 2 ++ */ ++ routing_type = FIELD_GET(RX_PACKET_ROUTING_TYPE, rx_lo); ++ routing_type = routing_type ? routing_type + 1 : 0; ++ bdf = _get_bdf(priv); ++ /* Length[7:0] */ ++ hdr[0] = FIELD_GET(RX_PACKET_SIZE, rx_lo); ++ /* TD:EP:ATTR[1:0]:R or AT[1:0]:Length[9:8] */ ++ hdr[1] = 0; ++ /* R or T9:TC[2:0]:R[3:0] */ ++ hdr[2] = 0; ++ /* R or Fmt[2]:Fmt[1:0]=b'11:Type[4:3]=b'10:Type[2:0] */ ++ hdr[3] = 0x70 | routing_type; ++ /* VDM message code = 0x7f */ ++ hdr[4] = 0x7f; ++ /* R[1:0]:Pad len[1:0]:MCTP VDM Code[3:0] */ ++ hdr[5] = FIELD_GET(RX_PACKET_PADDING_LEN, rx_lo) << 4; ++ /* TODO: PCI Requester ID: HW didn't get this information */ ++ hdr[6] = 0; ++ hdr[7] = 5; ++ /* Vendor ID: 0x1AB4 */ ++ hdr[8] = 0xb4; ++ hdr[9] = 0x1a; ++ /* PCI Target ID */ ++ hdr[10] = bdf & 0xff; ++ hdr[11] = bdf >> 8 & 0xff; ++ /* SOM:EOM:Pkt Seq#[1:0]:TO:Msg Tag[2:0]*/ ++ hdr[12] = FIELD_GET(RX_PACKET_SOM, rx_lo) << 7 | ++ FIELD_GET(RX_PACKET_EOM, rx_lo) << 6 | ++ FIELD_GET(RX_PACKET_SEQ_NUMBER, rx_lo) << 4 | ++ FIELD_GET(RX_PACKET_TAG_OWNER, rx_lo) << 3 | ++ FIELD_GET(RX_PACKET_MSG_TAG, rx_lo); ++ /* Source Endpoint ID */ ++ hdr[13] = FIELD_GET(RX_PACKET_SRC_EID, rx_lo); ++ /* Destination Endpoint ID: HW didn't get this information*/ ++ hdr[14] = priv->eid; ++ /* TODO: R[3:0]: header version[3:0] */ ++ hdr[15] = 1; ++} ++ ++static void aspeed_mctp_rx_tasklet(unsigned long data) ++{ ++ struct mctp_channel *rx = (struct mctp_channel *)data; ++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx); ++ struct mctp_pcie_packet *rx_packet; ++ struct aspeed_mctp_rx_cmd *rx_cmd; ++ u32 hw_read_ptr; ++ u32 *hdr, *payload; ++ bool rx_full; ++ ++ /* initialized as false */ ++ rx_full = false; ++ ++ if (priv->match_data->vdm_hdr_direct_xfer && priv->match_data->fifo_auto_surround) { ++ struct mctp_pcie_packet_data *rx_buf; ++ u32 residual_cmds = 0; ++ ++ /* Trigger HW read pointer update, must be done before RX loop */ ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, UPDATE_RX_RD_PTR); ++ ++ /* ++ * rx->stopped indicates if rx ring is full or not. ++ * Use rx_full to note ring status before consuming packet. ++ */ ++ rx_full = rx->stopped; ++ ++ /* ++ * XXX: Using rd_ptr obtained from HW is unreliable so we need to ++ * maintain the state of buffer on our own by peeking into the buffer ++ * and checking where the packet was written. ++ */ ++ rx_buf = (struct mctp_pcie_packet_data *)rx->data.vaddr; ++ hdr = (u32 *)&rx_buf[rx->wr_ptr]; ++ if (!*hdr && priv->rx_warmup) { ++ u32 tmp_wr_ptr = rx->wr_ptr; ++ ++ /* ++ * HACK: Right after start the RX hardware can put received ++ * packet into an unexpected offset - in order to locate ++ * received packet driver has to scan all RX data buffers. ++ */ ++ do { ++ tmp_wr_ptr = (tmp_wr_ptr + 1) % priv->rx_packet_count; ++ ++ hdr = (u32 *)&rx_buf[tmp_wr_ptr]; ++ } while (!*hdr && tmp_wr_ptr != rx->wr_ptr); ++ ++ if (tmp_wr_ptr != rx->wr_ptr) { ++ dev_warn(priv->dev, ++ "Runaway RX packet found %d -> %d\n", ++ rx->wr_ptr, tmp_wr_ptr); ++ residual_cmds = abs(tmp_wr_ptr - rx->wr_ptr); ++ rx->wr_ptr = tmp_wr_ptr; ++ if (!priv->rx_runaway_wa.enable && ++ priv->rx_warmup) ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE, ++ rx->buffer_count - residual_cmds); ++ priv->rx_warmup = false; ++ } ++ } else { ++ priv->rx_warmup = false; ++ } ++ ++ if (priv->rx_runaway_wa.packet_counter > priv->rx_packet_count && ++ priv->rx_runaway_wa.first_loop) { ++ if (priv->rx_runaway_wa.enable) ++ /* ++ * Once we receive RX_PACKET_COUNT packets, hardware is ++ * guaranteed to use (RX_PACKET_COUNT - 4) buffers. Decrease ++ * buffer count by 4, then we can turn off scanning of RX ++ * buffers. RX buffer scanning should be enabled every time ++ * RX hardware is started. ++ * This is just a performance optimization - we could keep ++ * scanning RX buffers forever, but under heavy traffic it is ++ * fairly common that rx_tasklet is executed while RX buffer ++ * ring is empty. ++ */ ++ rx->buffer_count = priv->rx_packet_count - 4; ++ else ++ /* ++ * Once we receive RX_PACKET_COUNT packets, we need to restore the ++ * RX buffer size to 4 byte aligned value to avoid rx runaway. ++ */ ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE, ++ rx->buffer_count); ++ priv->rx_runaway_wa.first_loop = false; ++ } ++ ++ while (*hdr != 0) { ++ if (FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE, hdr[0]) * 4 > ++ ASPEED_MCTP_MTU) ++ dev_warn(priv->dev, ++ "Rx length %ld > MTU size %d\n", ++ FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE, ++ hdr[0]) * ++ 4, ++ ASPEED_MCTP_MTU); ++ rx_packet = aspeed_mctp_packet_alloc(GFP_ATOMIC); ++ if (rx_packet) { ++ memcpy(&rx_packet->data, hdr, sizeof(rx_packet->data)); ++ aspeed_mctp_swap_pcie_vdm_hdr(&rx_packet->data); ++ ++ aspeed_mctp_dispatch_packet(priv, rx_packet); ++ } else { ++ dev_dbg(priv->dev, "Failed to allocate RX packet\n"); ++ } ++ data_dump(priv, &rx_packet->data); ++ *hdr = 0; ++ rx->wr_ptr = (rx->wr_ptr + 1) % rx->buffer_count; ++ hdr = (u32 *)&rx_buf[rx->wr_ptr]; ++ ++ priv->rx_runaway_wa.packet_counter++; ++ } ++ ++ /* ++ * Update HW write pointer, this can be done only after driver consumes ++ * packets from RX ring. ++ */ ++ regmap_read(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, &hw_read_ptr); ++ hw_read_ptr &= RX_BUF_RD_PTR_MASK; ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, (hw_read_ptr)); ++ ++ dev_dbg(priv->dev, "RX hw ptr %02d, sw ptr %2d\n", ++ hw_read_ptr, rx->wr_ptr); ++ } else { ++ struct mctp_pcie_packet_data_2500 *rx_buf; ++ ++ rx_buf = (struct mctp_pcie_packet_data_2500 *)rx->data.vaddr; ++ payload = (u32 *)&rx_buf[rx->wr_ptr]; ++ rx_cmd = (struct aspeed_mctp_rx_cmd *)rx->cmd.vaddr; ++ hdr = (u32 *)&((rx_cmd + rx->wr_ptr)->rx_lo); ++ ++ if (!*hdr) { ++ u32 tmp_wr_ptr = rx->wr_ptr; ++ ++ /* ++ * HACK: Right after start the RX hardware can put received ++ * packet into an unexpected offset - in order to locate ++ * received packet driver has to scan all RX data buffers. ++ */ ++ do { ++ tmp_wr_ptr = (tmp_wr_ptr + 1) % rx->buffer_count; ++ ++ hdr = (u32 *)&((rx_cmd + tmp_wr_ptr)->rx_lo); ++ } while (!*hdr && tmp_wr_ptr != rx->wr_ptr); ++ ++ if (tmp_wr_ptr != rx->wr_ptr) { ++ dev_warn(priv->dev, ++ "Runaway RX packet found %d -> %d\n", ++ rx->wr_ptr, tmp_wr_ptr); ++ rx->wr_ptr = tmp_wr_ptr; ++ } ++ } ++ ++ while (*hdr != 0) { ++ rx_packet = aspeed_mctp_packet_alloc(GFP_ATOMIC); ++ if (rx_packet) { ++ memcpy(rx_packet->data.payload, payload, ++ sizeof(rx_packet->data.payload)); ++ ++ aspeed_mctp_rx_hdr_prep(priv, (u8 *)rx_packet->data.hdr, *hdr); ++ ++ aspeed_mctp_swap_pcie_vdm_hdr(&rx_packet->data); ++ ++ aspeed_mctp_dispatch_packet(priv, rx_packet); ++ } else { ++ dev_dbg(priv->dev, "Failed to allocate RX packet\n"); ++ } ++ dev_dbg(priv->dev, ++ "rx->wr_ptr = %d, rx_cmd->rx_lo = %08x", ++ rx->wr_ptr, *hdr); ++ data_dump(priv, &rx_packet->data); ++ *hdr = 0; ++ rx->wr_ptr = (rx->wr_ptr + 1) % rx->buffer_count; ++ payload = (u32 *)&rx_buf[rx->wr_ptr]; ++ hdr = (u32 *)&((rx_cmd + rx->wr_ptr)->rx_lo); ++ } ++ } ++ ++ /* Kick RX if it was stopped due to ring full condition */ ++ if (rx->stopped) { ++ if (!rx_full) { ++ /* ++ * RX ring may still be full in here as the HW keeps producing when Tasklet consumes the packets. ++ * Use rx_full to detect if RX ring is already full before or after Tasklet consumption. ++ * Schedule another tasklet here to consume RX ring before restarting reception if ring is full after the while loop, ++ * in case that RX_CMD_NO_MORE_INT interrupts tasklet after tasklet consumes packets. ++ * Use flag cause we cannot control if ASPEED_MCTP_RX_BUF_WR_PTR can be updated before ring full occurs. ++ * Example of problematic scenario: ++ * 1. Tasklet executing, found *hdr==0 at wr_ptr=14, break the while loop and going forward. ++ * 2. After leaving the loop, Tasklet spend time doing some time-consuming stuffs like printing log. ++ * 3. HW keep receiving during step2, and triggered RX_CMD_NO_MORE_INT to set rx->stopped to true in the IRQ handler. ++ * 4. CPU returns to Tasklet, after step2 the tasklet sees rx->stopped == true, therefore kick RX_READY to restart RX. ++ * 5. Issue reproduced, RX restarted without stored packets consumed, and get overwritten later. ++ */ ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ } else { ++ rx->stopped = false; ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY, ++ RX_CMD_READY); ++ } ++ } ++} ++ ++static void aspeed_mctp_rx_chan_init(struct mctp_channel *rx) ++{ ++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx); ++ u32 *rx_cmd = (u32 *)rx->cmd.vaddr; ++ struct aspeed_mctp_rx_cmd *rx_cmd_64 = ++ (struct aspeed_mctp_rx_cmd *)rx->cmd.vaddr; ++ u32 data_size = priv->match_data->packet_unit_size; ++ u32 hw_rx_count = priv->rx_packet_count; ++ struct mctp_pcie_packet_data *rx_buf = (struct mctp_pcie_packet_data *)rx->data.vaddr; ++ int i; ++ ++ if (priv->match_data->vdm_hdr_direct_xfer) { ++ if (priv->match_data->dma_need_64bits_width) { ++ for (i = 0; i < priv->rx_packet_count; i++) { ++ rx_cmd_64->rx_hi = ++ upper_32_bits((rx->data.dma_handle + data_size * i)); ++ rx_cmd_64->rx_lo = ++ (rx->data.dma_handle + data_size * i) & ++ GENMASK(31, 4); ++ rx_cmd_64->rx_lo |= RX_INTERRUPT_AFTER_CMD; ++ rx_cmd_64++; ++ } ++ } else { ++ for (i = 0; i < priv->rx_packet_count; i++) { ++ *rx_cmd = RX_DATA_ADDR(rx->data.dma_handle + data_size * i); ++ *rx_cmd |= RX_INTERRUPT_AFTER_CMD; ++ rx_cmd++; ++ } ++ } ++ } else { ++ for (i = 0; i < priv->rx_packet_count; i++) { ++ rx_cmd_64->rx_hi = RX_DATA_ADDR_2500(rx->data.dma_handle + data_size * i); ++ rx_cmd_64->rx_lo = 0; ++ if (i == priv->rx_packet_count - 1) ++ rx_cmd_64->rx_hi |= RX_LAST_CMD; ++ rx_cmd_64++; ++ } ++ } ++ /* Clear the header of rx data */ ++ for (i = 0; i < priv->rx_packet_count; i++) ++ *(u32 *)&rx_buf[i] = 0; ++ rx->wr_ptr = 0; ++ rx->buffer_count = priv->rx_packet_count; ++ if (priv->match_data->fifo_auto_surround) { ++ /* ++ * TODO: Once read pointer runaway bug is fixed in some future AST2x00 ++ * stepping then add chip revision detection and turn on this ++ * workaround only when needed ++ */ ++ if (priv->match_data->dma_need_64bits_width) ++ priv->rx_runaway_wa.enable = false; ++ else ++ priv->rx_runaway_wa.enable = ++ (chip_version(priv->dev) == ASPEED_MCTP_2600) ? ++ true : ++ false; ++ ++ /* ++ * Hardware does not wrap around ASPEED_MCTP_RX_BUF_SIZE ++ * correctly - we have to set number of buffers to n/4 -1 ++ */ ++ if (priv->rx_runaway_wa.enable) ++ hw_rx_count = (priv->rx_packet_count / 4 - 1); ++ ++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE, hw_rx_count); ++ } ++} ++ ++static void aspeed_mctp_tx_chan_init(struct mctp_channel *tx) ++{ ++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx); ++ ++ tx->wr_ptr = 0; ++ tx->rd_ptr = 0; ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER, 0); ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_ADDR, tx->cmd.dma_handle); ++ if (priv->match_data->dma_need_64bits_width) ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_HI_ADDR, ++ upper_32_bits(tx->cmd.dma_handle)); ++ if (priv->match_data->fifo_auto_surround) { ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_SIZE, TX_PACKET_COUNT); ++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, 0); ++ } ++} ++ ++struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv) ++{ ++ struct mctp_client *client; ++ ++ client = aspeed_mctp_client_alloc(priv); ++ if (!client) ++ return NULL; ++ ++ init_waitqueue_head(&client->wait_queue); ++ ++ spin_lock_bh(&priv->clients_lock); ++ list_add_tail(&client->link, &priv->clients); ++ spin_unlock_bh(&priv->clients_lock); ++ ++ return client; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_create_client); ++ ++static int aspeed_mctp_open(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *misc = file->private_data; ++ struct platform_device *pdev = to_platform_device(misc->parent); ++ struct aspeed_mctp *priv = platform_get_drvdata(pdev); ++ struct mctp_client *client; ++ ++ client = aspeed_mctp_create_client(priv); ++ if (!client) ++ return -ENOMEM; ++ ++ file->private_data = client; ++ ++ return 0; ++} ++ ++void aspeed_mctp_delete_client(struct mctp_client *client) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_type_handler *handler, *tmp; ++ ++ spin_lock_bh(&priv->clients_lock); ++ ++ list_del(&client->link); ++ ++ if (priv->default_client == client) ++ priv->default_client = NULL; ++ ++ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers, ++ link) { ++ if (handler->client == client) { ++ list_del(&handler->link); ++ kfree(handler); ++ } ++ } ++ spin_unlock_bh(&priv->clients_lock); ++ ++ /* Disable the tasklet to appease lockdep */ ++ local_bh_disable(); ++ aspeed_mctp_client_put(client); ++ local_bh_enable(); ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_delete_client); ++ ++static int aspeed_mctp_release(struct inode *inode, struct file *file) ++{ ++ struct mctp_client *client = file->private_data; ++ ++ aspeed_mctp_delete_client(client); ++ ++ return 0; ++} ++ ++#define LEN_MASK_HI GENMASK(9, 8) ++#define LEN_MASK_LO GENMASK(7, 0) ++#define PCI_VDM_HDR_LEN_MASK_LO GENMASK(31, 24) ++#define PCI_VDM_HDR_LEN_MASK_HI GENMASK(17, 16) ++#define PCIE_VDM_HDR_REQUESTER_BDF_MASK GENMASK(31, 16) ++ ++int aspeed_mctp_send_packet(struct mctp_client *client, ++ struct mctp_pcie_packet *packet) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ u32 *hdr_dw = (u32 *)packet->data.hdr; ++ u8 *hdr = (u8 *)packet->data.hdr; ++ u8 *payload = (u8 *)packet->data.payload; ++ u16 packet_data_sz_dw; ++ u16 pci_data_len_dw; ++ int ret; ++ u16 bdf; ++ ++ ret = _get_bdf(priv); ++ if (ret < 0) ++ return ret; ++ bdf = ret; ++ ++ /* ++ * If the data size is different from contents of PCIe VDM header, ++ * aspeed_mctp_tx_cmd will be programmed incorrectly. This may cause ++ * MCTP HW to stop working. ++ */ ++ pci_data_len_dw = FIELD_PREP(LEN_MASK_LO, FIELD_GET(PCI_VDM_HDR_LEN_MASK_LO, hdr_dw[0])) | ++ FIELD_PREP(LEN_MASK_HI, FIELD_GET(PCI_VDM_HDR_LEN_MASK_HI, hdr_dw[0])); ++ if (pci_data_len_dw == 0) /* According to PCIe Spec, 0 means 1024 DW */ ++ pci_data_len_dw = SZ_1K; ++ ++ packet_data_sz_dw = packet->size / sizeof(u32) - sizeof(packet->data.hdr) / sizeof(u32); ++ if (packet_data_sz_dw != pci_data_len_dw) ++ return -EINVAL; ++ ++ be32p_replace_bits(&hdr_dw[1], bdf, PCIE_VDM_HDR_REQUESTER_BDF_MASK); ++ ++ /* ++ * XXX Don't update EID for MCTP Control messages - old EID may ++ * interfere with MCTP discovery flow. ++ */ ++ if (priv->eid && payload[MCTP_PAYLOAD_TYPE_OFFSET] != MCTP_HDR_TYPE_CONTROL) ++ hdr[MCTP_HDR_SRC_EID_OFFSET] = priv->eid; ++ ++ ret = ptr_ring_produce_bh(&client->tx_queue, packet); ++ if (!ret) ++ tasklet_hi_schedule(&priv->tx.tasklet); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_send_packet); ++ ++struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client, ++ unsigned long timeout) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ int ret; ++ ++ ret = _get_bdf(priv); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ ++ ret = wait_event_interruptible_timeout(client->wait_queue, ++ __ptr_ring_peek(&client->rx_queue), ++ timeout); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ else if (ret == 0) ++ return ERR_PTR(-ETIME); ++ ++ return ptr_ring_consume_bh(&client->rx_queue); ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_receive_packet); ++ ++void aspeed_mctp_flush_rx_queue(struct mctp_client *client) ++{ ++ struct mctp_pcie_packet *packet; ++ ++ while ((packet = ptr_ring_consume_bh(&client->rx_queue))) ++ aspeed_mctp_packet_free(packet); ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_flush_rx_queue); ++ ++static ssize_t aspeed_mctp_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct mctp_client *client = file->private_data; ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_pcie_packet *rx_packet; ++ u32 mctp_ctrl; ++ u32 mctp_int_sts; ++ ++ if (count < PCIE_MCTP_MIN_PACKET_SIZE) ++ return -EINVAL; ++ ++ if (count > sizeof(rx_packet->data)) ++ count = sizeof(rx_packet->data); ++ ++ if (priv->miss_mctp_int) { ++ regmap_read(priv->map, ASPEED_MCTP_CTRL, &mctp_ctrl); ++ if (!(mctp_ctrl & RX_CMD_READY)) ++ priv->rx.stopped = true; ++ /* Polling the RX_CMD_RECEIVE_INT to ensure rx_tasklet can find the data */ ++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &mctp_int_sts); ++ if (mctp_int_sts & RX_CMD_RECEIVE_INT) ++ regmap_write(priv->map, ASPEED_MCTP_INT_STS, ++ mctp_int_sts); ++ } ++ ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ rx_packet = ptr_ring_consume_bh(&client->rx_queue); ++ if (!rx_packet) ++ return -EAGAIN; ++ ++ if (copy_to_user(buf, &rx_packet->data, count)) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ count = -EFAULT; ++ } ++ ++ aspeed_mctp_packet_free(rx_packet); ++ ++ return count; ++} ++ ++static void aspeed_mctp_flush_tx_queue(struct mctp_client *client) ++{ ++ struct mctp_pcie_packet *packet; ++ ++ while ((packet = ptr_ring_consume_bh(&client->tx_queue))) ++ aspeed_mctp_packet_free(packet); ++} ++ ++static void aspeed_mctp_flush_all_tx_queues(struct aspeed_mctp *priv) ++{ ++ struct mctp_client *client; ++ ++ spin_lock_bh(&priv->clients_lock); ++ list_for_each_entry(client, &priv->clients, link) ++ aspeed_mctp_flush_tx_queue(client); ++ spin_unlock_bh(&priv->clients_lock); ++} ++ ++static ssize_t aspeed_mctp_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct mctp_client *client = file->private_data; ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_pcie_packet *tx_packet; ++ int ret; ++ ++ if (count < PCIE_MCTP_MIN_PACKET_SIZE) ++ return -EINVAL; ++ ++ if (count > sizeof(tx_packet->data)) ++ return -ENOSPC; ++ ++ tx_packet = aspeed_mctp_packet_alloc(GFP_KERNEL); ++ if (!tx_packet) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ if (copy_from_user(&tx_packet->data, buf, count)) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ ret = -EFAULT; ++ goto out_packet; ++ } ++ ++ tx_packet->size = count; ++ ++ ret = aspeed_mctp_send_packet(client, tx_packet); ++ if (ret) ++ goto out_packet; ++ ++ return count; ++ ++out_packet: ++ aspeed_mctp_packet_free(tx_packet); ++out: ++ return ret; ++} ++ ++int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type, ++ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_type_handler *handler, *new_handler; ++ int ret = 0; ++ ++ if (mctp_type <= MCTP_HDR_TYPE_BASE_LAST) { ++ /* Vendor, type and type mask must be zero for types 0-5 */ ++ if (pci_vendor_id != 0 || vdm_type != 0 || vdm_mask != 0) ++ return -EINVAL; ++ } else if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) { ++ /* For Vendor Defined PCI type the vendor ID must be nonzero */ ++ if (pci_vendor_id == 0 || pci_vendor_id == 0xffff) ++ return -EINVAL; ++ } else { ++ return -EINVAL; ++ } ++ ++ new_handler = kzalloc(sizeof(*new_handler), GFP_KERNEL); ++ if (!new_handler) ++ return -ENOMEM; ++ new_handler->mctp_type = mctp_type; ++ new_handler->pci_vendor_id = pci_vendor_id; ++ new_handler->vdm_type = vdm_type & vdm_mask; ++ new_handler->vdm_mask = vdm_mask; ++ new_handler->client = client; ++ ++ spin_lock_bh(&priv->clients_lock); ++ list_for_each_entry(handler, &priv->mctp_type_handlers, link) { ++ if (handler->mctp_type == new_handler->mctp_type && ++ handler->pci_vendor_id == new_handler->pci_vendor_id && ++ handler->vdm_type == new_handler->vdm_type) { ++ if (handler->client != new_handler->client) ++ ret = -EBUSY; ++ kfree(new_handler); ++ goto out_unlock; ++ } ++ } ++ list_add_tail(&new_handler->link, &priv->mctp_type_handlers); ++out_unlock: ++ spin_unlock_bh(&priv->clients_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_add_type_handler); ++ ++static int aspeed_mctp_remove_type_handler(struct mctp_client *client, ++ u8 mctp_type, u16 pci_vendor_id, ++ u16 vdm_type, u16 vdm_mask) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_type_handler *handler, *tmp; ++ int ret = -EINVAL; ++ ++ vdm_type &= vdm_mask; ++ ++ spin_lock_bh(&priv->clients_lock); ++ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers, ++ link) { ++ if (handler->client == client && ++ handler->mctp_type == mctp_type && ++ handler->pci_vendor_id == pci_vendor_id && ++ handler->vdm_type == vdm_type) { ++ list_del(&handler->link); ++ kfree(handler); ++ ret = 0; ++ break; ++ } ++ } ++ spin_unlock_bh(&priv->clients_lock); ++ return ret; ++} ++ ++int aspeed_mctp_register_default_handler(struct mctp_client *client) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ int ret = 0; ++ ++ spin_lock_bh(&priv->clients_lock); ++ ++ if (!priv->default_client) ++ priv->default_client = client; ++ else if (priv->default_client != client) ++ ret = -EBUSY; ++ ++ spin_unlock_bh(&priv->clients_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_register_default_handler); ++ ++static int ++aspeed_mctp_register_type_handler(struct mctp_client *client, ++ void __user *userbuf) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ struct aspeed_mctp_type_handler_ioctl handler; ++ ++ if (copy_from_user(&handler, userbuf, sizeof(handler))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ return aspeed_mctp_add_type_handler(client, handler.mctp_type, ++ handler.pci_vendor_id, ++ handler.vendor_type, ++ handler.vendor_type_mask); ++} ++ ++static int ++aspeed_mctp_unregister_type_handler(struct mctp_client *client, ++ void __user *userbuf) ++{ ++ struct aspeed_mctp *priv = client->priv; ++ struct aspeed_mctp_type_handler_ioctl handler; ++ ++ if (copy_from_user(&handler, userbuf, sizeof(handler))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ return aspeed_mctp_remove_type_handler(client, handler.mctp_type, ++ handler.pci_vendor_id, ++ handler.vendor_type, ++ handler.vendor_type_mask); ++} ++ ++static int ++aspeed_mctp_filter_eid(struct aspeed_mctp *priv, void __user *userbuf) ++{ ++ struct aspeed_mctp_filter_eid eid; ++ ++ if (copy_from_user(&eid, userbuf, sizeof(eid))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ if (eid.enable) { ++ regmap_update_bits(priv->map, ASPEED_MCTP_EID, ++ MCTP_EID, eid.eid); ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, ++ MATCHING_EID, MATCHING_EID); ++ } else { ++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, ++ MATCHING_EID, 0); ++ } ++ return 0; ++} ++ ++static int aspeed_mctp_get_bdf(struct aspeed_mctp *priv, void __user *userbuf) ++{ ++ struct aspeed_mctp_get_bdf bdf = { _get_bdf(priv) }; ++ ++ if (copy_to_user(userbuf, &bdf, sizeof(bdf))) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int ++aspeed_mctp_get_medium_id(struct aspeed_mctp *priv, void __user *userbuf) ++{ ++ struct aspeed_mctp_get_medium_id id = { 0x09 }; /* PCIe revision 2.0 */ ++ ++ if (copy_to_user(userbuf, &id, sizeof(id))) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static int ++aspeed_mctp_get_mtu(struct aspeed_mctp *priv, void __user *userbuf) ++{ ++ struct aspeed_mctp_get_mtu id = { ASPEED_MCTP_MTU }; ++ ++ if (copy_to_user(userbuf, &id, sizeof(id))) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf) ++{ ++ struct aspeed_mctp_endpoint *endpoint; ++ int ret = -ENOENT; ++ ++ mutex_lock(&client->priv->endpoints_lock); ++ list_for_each_entry(endpoint, &client->priv->endpoints, link) { ++ if (endpoint->data.eid_info.eid == eid) { ++ *bdf = endpoint->data.eid_info.bdf; ++ ret = 0; ++ break; ++ } ++ } ++ mutex_unlock(&client->priv->endpoints_lock); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid_bdf); ++ ++int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf, ++ u8 domain_id, u8 *eid) ++{ ++ struct aspeed_mctp_endpoint *endpoint; ++ int ret = -ENOENT; ++ ++ mutex_lock(&client->priv->endpoints_lock); ++ ++ list_for_each_entry(endpoint, &client->priv->endpoints, link) { ++ if (endpoint->data.eid_ext_info.domain_id == domain_id && ++ endpoint->data.eid_ext_info.bdf == bdf) { ++ *eid = endpoint->data.eid_ext_info.eid; ++ ret = 0; ++ break; ++ } ++ } ++ ++ mutex_unlock(&client->priv->endpoints_lock); ++ return ret; ++} ++EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid); ++ ++static int ++aspeed_mctp_get_eid_info(struct aspeed_mctp *priv, void __user *userbuf, ++ enum mctp_address_type addr_format) ++{ ++ int count = 0; ++ int ret = 0; ++ struct aspeed_mctp_get_eid_info get_eid; ++ struct aspeed_mctp_endpoint *endpoint; ++ void *user_ptr; ++ size_t count_to_copy; ++ ++ if (copy_from_user(&get_eid, userbuf, sizeof(get_eid))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ mutex_lock(&priv->endpoints_lock); ++ ++ if (get_eid.count == 0) { ++ count = priv->endpoints_count; ++ goto out_unlock; ++ } ++ ++ user_ptr = u64_to_user_ptr(get_eid.ptr); ++ count_to_copy = get_eid.count > priv->endpoints_count ? ++ priv->endpoints_count : get_eid.count; ++ list_for_each_entry(endpoint, &priv->endpoints, link) { ++ if (endpoint->data.eid_info.eid < get_eid.start_eid) ++ continue; ++ if (count >= count_to_copy) ++ break; ++ ++ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT) ++ ret = copy_to_user(&(((struct aspeed_mctp_eid_ext_info *) ++ user_ptr)[count]), ++ &endpoint->data, ++ sizeof(struct aspeed_mctp_eid_ext_info)); ++ else ++ ret = copy_to_user(&(((struct aspeed_mctp_eid_info *) ++ user_ptr)[count]), ++ &endpoint->data, ++ sizeof(struct aspeed_mctp_eid_info)); ++ ++ if (ret) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ ret = -EFAULT; ++ goto out_unlock; ++ } ++ count++; ++ } ++ ++out_unlock: ++ get_eid.count = count; ++ if (copy_to_user(userbuf, &get_eid, sizeof(get_eid))) { ++ dev_err(priv->dev, "copy to user failed\n"); ++ ret = -EFAULT; ++ } ++ ++ mutex_unlock(&priv->endpoints_lock); ++ return ret; ++} ++ ++static int ++eid_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) ++{ ++ struct aspeed_mctp_endpoint *endpoint_a; ++ struct aspeed_mctp_endpoint *endpoint_b; ++ ++ if (a == b) ++ return 0; ++ ++ endpoint_a = list_entry(a, typeof(*endpoint_a), link); ++ endpoint_b = list_entry(b, typeof(*endpoint_b), link); ++ ++ if (endpoint_a->data.eid_info.eid < endpoint_b->data.eid_info.eid) ++ return -1; ++ else if (endpoint_a->data.eid_info.eid > endpoint_b->data.eid_info.eid) ++ return 1; ++ ++ return 0; ++} ++ ++static void aspeed_mctp_eid_info_list_remove(struct list_head *list) ++{ ++ struct aspeed_mctp_endpoint *endpoint; ++ struct aspeed_mctp_endpoint *tmp; ++ ++ list_for_each_entry_safe(endpoint, tmp, list, link) { ++ list_del(&endpoint->link); ++ kfree(endpoint); ++ } ++} ++ ++static bool ++aspeed_mctp_eid_info_list_valid(struct list_head *list) ++{ ++ struct aspeed_mctp_endpoint *endpoint; ++ struct aspeed_mctp_endpoint *next; ++ ++ list_for_each_entry(endpoint, list, link) { ++ next = list_next_entry(endpoint, link); ++ if (&next->link == list) ++ break; ++ ++ /* duplicted eids */ ++ if (next->data.eid_info.eid == endpoint->data.eid_info.eid) ++ return false; ++ } ++ ++ return true; ++} ++ ++static int ++aspeed_mctp_set_eid_info(struct aspeed_mctp *priv, void __user *userbuf, ++ enum mctp_address_type addr_format) ++{ ++ struct list_head list = LIST_HEAD_INIT(list); ++ struct aspeed_mctp_set_eid_info set_eid; ++ void *user_ptr; ++ struct aspeed_mctp_endpoint *endpoint; ++ int ret = 0; ++ u8 eid = 0; ++ size_t i; ++ ++ if (copy_from_user(&set_eid, userbuf, sizeof(set_eid))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ if (set_eid.count > ASPEED_MCTP_EID_INFO_MAX) ++ return -EINVAL; ++ ++ user_ptr = u64_to_user_ptr(set_eid.ptr); ++ for (i = 0; i < set_eid.count; i++) { ++ endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL); ++ if (!endpoint) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ memset(endpoint, 0, sizeof(*endpoint)); ++ ++ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT) ++ ret = copy_from_user(&endpoint->data, ++ &(((struct aspeed_mctp_eid_ext_info *) ++ user_ptr)[i]), ++ sizeof(struct aspeed_mctp_eid_ext_info)); ++ else ++ ret = copy_from_user(&endpoint->data, ++ &(((struct aspeed_mctp_eid_info *) ++ user_ptr)[i]), ++ sizeof(struct aspeed_mctp_eid_info)); ++ ++ if (ret) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ kfree(endpoint); ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* Detect self EID */ ++ if (_get_bdf(priv) == endpoint->data.eid_info.bdf) { ++ /* ++ * XXX Use smallest EID with matching BDF. ++ * On some platforms there could be multiple endpoints ++ * with same BDF in routing table. ++ */ ++ if (eid == 0 || endpoint->data.eid_info.eid < eid) ++ eid = endpoint->data.eid_info.eid; ++ } ++ ++ list_add_tail(&endpoint->link, &list); ++ } ++ ++ list_sort(NULL, &list, eid_info_cmp); ++ if (!aspeed_mctp_eid_info_list_valid(&list)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ mutex_lock(&priv->endpoints_lock); ++ if (list_empty(&priv->endpoints)) ++ list_splice_init(&list, &priv->endpoints); ++ else ++ list_swap(&list, &priv->endpoints); ++ priv->endpoints_count = set_eid.count; ++ priv->eid = eid; ++ mutex_unlock(&priv->endpoints_lock); ++out: ++ aspeed_mctp_eid_info_list_remove(&list); ++ return ret; ++} ++ ++static int aspeed_mctp_set_own_eid(struct aspeed_mctp *priv, void __user *userbuf) ++{ ++ struct aspeed_mctp_set_own_eid data; ++ ++ if (copy_from_user(&data, userbuf, sizeof(data))) { ++ dev_err(priv->dev, "copy from user failed\n"); ++ return -EFAULT; ++ } ++ ++ priv->eid = data.eid; ++ ++ return 0; ++} ++ ++static long ++aspeed_mctp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct mctp_client *client = file->private_data; ++ struct aspeed_mctp *priv = client->priv; ++ void __user *userbuf = (void __user *)arg; ++ int ret; ++ ++ switch (cmd) { ++ case ASPEED_MCTP_IOCTL_FILTER_EID: ++ ret = aspeed_mctp_filter_eid(priv, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_GET_BDF: ++ ret = aspeed_mctp_get_bdf(priv, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_GET_MEDIUM_ID: ++ ret = aspeed_mctp_get_medium_id(priv, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_GET_MTU: ++ ret = aspeed_mctp_get_mtu(priv, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER: ++ ret = aspeed_mctp_register_default_handler(client); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER: ++ ret = aspeed_mctp_register_type_handler(client, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER: ++ ret = aspeed_mctp_unregister_type_handler(client, userbuf); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_GET_EID_INFO: ++ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO: ++ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_SET_EID_INFO: ++ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO: ++ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT); ++ break; ++ ++ case ASPEED_MCTP_IOCTL_SET_OWN_EID: ++ ret = aspeed_mctp_set_own_eid(priv, userbuf); ++ break; ++ ++ default: ++ dev_err(priv->dev, "Command not found\n"); ++ ret = -ENOTTY; ++ } ++ ++ return ret; ++} ++ ++static __poll_t aspeed_mctp_poll(struct file *file, ++ struct poll_table_struct *pt) ++{ ++ struct mctp_client *client = file->private_data; ++ __poll_t ret = 0; ++ struct aspeed_mctp *priv = client->priv; ++ struct mctp_channel *rx = &priv->rx; ++ u32 mctp_ctrl; ++ u32 mctp_int_sts; ++ ++ if (priv->miss_mctp_int) { ++ regmap_read(priv->map, ASPEED_MCTP_CTRL, &mctp_ctrl); ++ if (!(mctp_ctrl & RX_CMD_READY)) ++ rx->stopped = true; ++ /* Polling the RX_CMD_RECEIVE_INT to ensure rx_tasklet can find the data */ ++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &mctp_int_sts); ++ if (mctp_int_sts & RX_CMD_RECEIVE_INT) ++ regmap_write(priv->map, ASPEED_MCTP_INT_STS, ++ mctp_int_sts); ++ } ++ ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ poll_wait(file, &client->wait_queue, pt); ++ ++ if (!ptr_ring_full_bh(&client->tx_queue)) ++ ret |= EPOLLOUT; ++ ++ if (__ptr_ring_peek(&client->rx_queue)) ++ ret |= EPOLLIN; ++ ++ return ret; ++} ++ ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++static int aspeed_mctp_pcie_vdm_op_send_pkt(struct device *dev, ++ u8 *data, size_t size) ++{ ++ struct mctp_pcie_packet *packet; ++ struct platform_device *pdev; ++ struct aspeed_mctp *priv; ++ int rc; ++ ++ pdev = to_platform_device(dev); ++ priv = platform_get_drvdata(pdev); ++ // freed at aspeed-mctp tx tasklet or send failure ++ packet = aspeed_mctp_packet_alloc(GFP_KERNEL); ++ ++ if (!packet) { ++ dev_err(priv->dev, "failed to alloc packet\n"); ++ return -ENOMEM; ++ } ++ ++ memcpy((u8 *)&packet->data.hdr, data, PCIE_VDM_HDR_SIZE); ++ memcpy((u8 *)&packet->data.payload, data + PCIE_VDM_HDR_SIZE, size); ++ packet->size = (size + PCIE_VDM_HDR_SIZE); ++ ++ rc = aspeed_mctp_send_packet(priv->default_client, packet); ++ if (rc) { ++ dev_err(priv->dev, "failed to send packet\n"); ++ aspeed_mctp_packet_free(packet); ++ return rc; ++ } ++ return 0; ++} ++ ++static u8 *aspeed_mctp_pcie_vdm_op_recv_pkt(struct device *dev) ++{ ++ struct platform_device *pdev; ++ struct aspeed_mctp *priv; ++ struct mctp_pcie_packet *rx_packet; ++ ++ pdev = to_platform_device(dev); ++ priv = platform_get_drvdata(pdev); ++ rx_packet = aspeed_mctp_receive_packet(priv->default_client, 0); ++ ++ if (IS_ERR(rx_packet)) { ++ if (PTR_ERR(rx_packet) == -ETIME) { ++ dev_dbg(priv->dev, "no packet received\n"); ++ } else { ++ dev_err(priv->dev, "failed to receive packet: %ld\n", ++ PTR_ERR(rx_packet)); ++ } ++ return (u8 *)rx_packet; ++ } ++ return (u8 *)&rx_packet->data; ++} ++ ++static void aspeed_mctp_pcie_vdm_op_uninit(struct device *dev) ++{ ++ struct platform_device *pdev; ++ struct aspeed_mctp *priv; ++ ++ pdev = to_platform_device(dev); ++ priv = platform_get_drvdata(pdev); ++ ++ aspeed_mctp_flush_all_tx_queues(priv); ++ aspeed_mctp_flush_rx_queue(priv->default_client); ++ aspeed_mctp_delete_client(priv->default_client); ++} ++ ++static const struct mctp_pcie_vdm_ops aspeed_mctp_pcie_vdm_ops = { ++ .send_packet = aspeed_mctp_pcie_vdm_op_send_pkt, ++ .recv_packet = aspeed_mctp_pcie_vdm_op_recv_pkt, ++ .free_packet = aspeed_mctp_packet_free, ++ .uninit = aspeed_mctp_pcie_vdm_op_uninit, ++}; ++ ++#endif ++ ++static const struct file_operations aspeed_mctp_fops = { ++ .owner = THIS_MODULE, ++ .open = aspeed_mctp_open, ++ .release = aspeed_mctp_release, ++ .read = aspeed_mctp_read, ++ .write = aspeed_mctp_write, ++ .unlocked_ioctl = aspeed_mctp_ioctl, ++ .poll = aspeed_mctp_poll, ++}; ++ ++static const struct regmap_config aspeed_mctp_regmap_cfg = { ++ .reg_bits = 32, ++ .reg_stride = 4, ++ .val_bits = 32, ++ .max_register = ASPEED_G7_MCTP_PCIE_BDF, ++}; ++ ++struct device_type aspeed_mctp_type = { ++ .name = "aspeed-mctp", ++}; ++ ++static void aspeed_mctp_send_pcie_uevent(struct kobject *kobj, bool ready) ++{ ++ char buf[32]; ++ char *envp[2]; ++ ++ snprintf(buf, sizeof(buf), ASPEED_MCTP_READY "=%d", ready ? 1 : 0); ++ envp[0] = buf; ++ envp[1] = NULL; ++ ++ kobject_uevent_env(kobj, KOBJ_CHANGE, envp); ++} ++ ++static void aspeed_mctp_irq_enable(struct aspeed_mctp *priv) ++{ ++ u32 enable = TX_CMD_SENT_INT | TX_CMD_WRONG_INT | ++ RX_CMD_RECEIVE_INT | RX_CMD_NO_MORE_INT; ++ ++ regmap_write(priv->map, ASPEED_MCTP_INT_EN, enable); ++} ++ ++static void aspeed_mctp_irq_disable(struct aspeed_mctp *priv) ++{ ++ regmap_write(priv->map, ASPEED_MCTP_INT_EN, 0); ++} ++ ++static void aspeed_mctp_pcie_setup(struct aspeed_mctp *priv) ++{ ++ int ret; ++ u8 tx_max_payload_size; ++ u8 rx_max_payload_size; ++ struct kobject *kobj = &priv->mctp_miscdev.this_device->kobj; ++ ++ ret = _get_bdf(priv); ++ ++ if (ret >= 0) { ++ cancel_delayed_work(&priv->pcie.rst_dwork); ++ if (priv->match_data->need_address_mapping) ++ regmap_update_bits(priv->map, ASPEED_MCTP_EID, ++ MEMORY_SPACE_MAPPING, BIT(31)); ++ ++ /* Only set TX MPS since HW will parse RX packet to decide how many bytes to receive ++ * based on the length field in PCIe VDM header. ++ */ ++ if (priv->match_data->dma_need_64bits_width) { ++ tx_max_payload_size = ++ FIELD_GET(TX_MAX_PAYLOAD_SIZE_MASK, ++ ilog2(ASPEED_MCTP_MTU >> 6)); ++ } else { ++ /* ++ * In ast2600, tx som and eom will not match expected result. ++ * e.g. When Maximum Transmit Unit (MTU) set to 64 byte, and then transfer ++ * size set between 61 ~ 124 (MTU-3 ~ 2*MTU-4), the engine will set all ++ * packet vdm header eom to 1, no matter what it setted. To fix that ++ * issue, the driver set MTU to next level(e.g. 64 to 128). ++ */ ++ tx_max_payload_size = ++ FIELD_GET(TX_MAX_PAYLOAD_SIZE_MASK, ++ fls(ASPEED_MCTP_MTU >> 6)); ++ } ++ ++ regmap_update_bits(priv->map, ASPEED_MCTP_ENGINE_CTRL, ++ TX_MAX_PAYLOAD_SIZE_MASK | RX_MAX_PAYLOAD_SIZE_MASK, ++ (rx_max_payload_size << RX_MAX_PAYLOAD_SIZE_SHIFT) | tx_max_payload_size); ++ ++ aspeed_mctp_flush_all_tx_queues(priv); ++ if (!priv->miss_mctp_int) { ++ aspeed_mctp_irq_enable(priv); ++ } else { ++ if (priv->rx_det_period_us) ++ schedule_delayed_work(&priv->rx_det_dwork, ++ usecs_to_jiffies(priv->rx_det_period_us)); ++ } ++ aspeed_mctp_rx_trigger(&priv->rx); ++ aspeed_mctp_send_pcie_uevent(kobj, true); ++ } else { ++ schedule_delayed_work(&priv->pcie.rst_dwork, ++ msecs_to_jiffies(1000)); ++ } ++} ++ ++static void aspeed_mctp_reset_work(struct work_struct *work) ++{ ++ struct aspeed_mctp *priv = container_of(work, typeof(*priv), ++ pcie.rst_dwork.work); ++ struct kobject *kobj = &priv->mctp_miscdev.this_device->kobj; ++ ++ if (priv->pcie.need_uevent) { ++ aspeed_mctp_send_pcie_uevent(kobj, false); ++ priv->pcie.need_uevent = false; ++ } ++ ++ aspeed_mctp_pcie_setup(priv); ++} ++ ++static void aspeed_mctp_rx_detect_work(struct work_struct *work) ++{ ++ struct aspeed_mctp *priv = ++ container_of(work, typeof(*priv), rx_det_dwork.work); ++ ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ schedule_delayed_work(&priv->rx_det_dwork, ++ usecs_to_jiffies(priv->rx_det_period_us)); ++} ++ ++static void aspeed_mctp_channels_init(struct aspeed_mctp *priv) ++{ ++ aspeed_mctp_rx_chan_init(&priv->rx); ++ aspeed_mctp_tx_chan_init(&priv->tx); ++} ++ ++static irqreturn_t aspeed_mctp_irq_handler(int irq, void *arg) ++{ ++ struct aspeed_mctp *priv = arg; ++ u32 handled = 0; ++ u32 status; ++ ++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &status); ++ regmap_write(priv->map, ASPEED_MCTP_INT_STS, status); ++ ++ if (status & TX_CMD_SENT_INT) { ++ tasklet_hi_schedule(&priv->tx.tasklet); ++ if (!priv->match_data->fifo_auto_surround) ++ priv->tx.rd_ptr = (priv->tx.rd_ptr + 1) % TX_PACKET_COUNT; ++ handled |= TX_CMD_SENT_INT; ++ } ++ ++ if (status & TX_CMD_WRONG_INT) { ++ /* TODO: print the actual command */ ++ dev_warn(priv->dev, "TX wrong"); ++ ++ handled |= TX_CMD_WRONG_INT; ++ } ++ ++ if (status & RX_CMD_RECEIVE_INT) { ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ ++ handled |= RX_CMD_RECEIVE_INT; ++ } ++ ++ if (status & RX_CMD_NO_MORE_INT) { ++ dev_dbg(priv->dev, "RX full"); ++ priv->rx.stopped = true; ++ tasklet_hi_schedule(&priv->rx.tasklet); ++ ++ handled |= RX_CMD_NO_MORE_INT; ++ } ++ ++ if (!handled) ++ return IRQ_NONE; ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_mctp_pcie_rst_irq_handler(int irq, void *arg) ++{ ++ struct aspeed_mctp *priv = arg; ++ ++ aspeed_mctp_channels_init(priv); ++ ++ priv->pcie.need_uevent = true; ++ priv->eid = 0; ++ ++ schedule_delayed_work(&priv->pcie.rst_dwork, 0); ++ ++ return IRQ_HANDLED; ++} ++ ++static void aspeed_mctp_drv_init(struct aspeed_mctp *priv) ++{ ++ INIT_LIST_HEAD(&priv->clients); ++ INIT_LIST_HEAD(&priv->mctp_type_handlers); ++ INIT_LIST_HEAD(&priv->endpoints); ++ ++ spin_lock_init(&priv->clients_lock); ++ mutex_init(&priv->endpoints_lock); ++ ++ INIT_DELAYED_WORK(&priv->pcie.rst_dwork, aspeed_mctp_reset_work); ++ ++ tasklet_init(&priv->tx.tasklet, aspeed_mctp_tx_tasklet, ++ (unsigned long)&priv->tx); ++ tasklet_init(&priv->rx.tasklet, aspeed_mctp_rx_tasklet, ++ (unsigned long)&priv->rx); ++} ++ ++static void aspeed_mctp_drv_fini(struct aspeed_mctp *priv) ++{ ++ aspeed_mctp_eid_info_list_remove(&priv->endpoints); ++ tasklet_disable(&priv->tx.tasklet); ++ tasklet_kill(&priv->tx.tasklet); ++ tasklet_disable(&priv->rx.tasklet); ++ tasklet_kill(&priv->rx.tasklet); ++ ++ cancel_delayed_work_sync(&priv->pcie.rst_dwork); ++ if (priv->miss_mctp_int) ++ cancel_delayed_work_sync(&priv->rx_det_dwork); ++} ++ ++static int aspeed_mctp_resources_init(struct aspeed_mctp *priv) ++{ ++ struct platform_device *pdev = to_platform_device(priv->dev); ++ void __iomem *regs; ++ ++ regs = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(regs)) { ++ dev_err(priv->dev, "Failed to get regmap!\n"); ++ return PTR_ERR(regs); ++ } ++ ++ priv->map = devm_regmap_init_mmio(priv->dev, regs, ++ &aspeed_mctp_regmap_cfg); ++ if (IS_ERR(priv->map)) ++ return PTR_ERR(priv->map); ++ ++ priv->reset = ++ priv->rc_f ? ++ devm_reset_control_get_by_index(priv->dev, 0) : ++ devm_reset_control_get_shared_by_index(priv->dev, 0); ++ if (IS_ERR(priv->reset)) { ++ dev_err(priv->dev, "Failed to get reset!\n"); ++ return PTR_ERR(priv->reset); ++ } ++ ++ if (priv->rc_f) { ++ priv->reset_dma = devm_reset_control_get_shared_by_index(priv->dev, 1); ++ if (IS_ERR(priv->reset_dma)) { ++ dev_err(priv->dev, "Failed to get ep reset!\n"); ++ return PTR_ERR(priv->reset_dma); ++ } ++ } ++ priv->pcie.map = ++ syscon_regmap_lookup_by_phandle(priv->dev->of_node, ++ "aspeed,pcieh"); ++ if (IS_ERR(priv->pcie.map)) { ++ dev_err(priv->dev, "Failed to find PCIe Host regmap!\n"); ++ return PTR_ERR(priv->pcie.map); ++ } ++ ++ platform_set_drvdata(pdev, priv); ++ ++ return 0; ++} ++ ++static void aspeed_release_rmem(void *d) ++{ ++ of_reserved_mem_device_release(d); ++} ++ ++static int aspeed_mctp_dma_init(struct aspeed_mctp *priv) ++{ ++ struct mctp_channel *tx = &priv->tx; ++ struct mctp_channel *rx = &priv->rx; ++ size_t alloc_size; ++ int ret = -ENOMEM; ++ ++ BUILD_BUG_ON(TX_PACKET_COUNT >= TX_MAX_PACKET_COUNT); ++ BUILD_BUG_ON(RX_PACKET_COUNT >= RX_MAX_PACKET_COUNT); ++ ++ ret = of_reserved_mem_device_init(priv->dev); ++ if (ret) { ++ dev_err(priv->dev, "device does not have specific DMA pool: %d\n", ++ ret); ++ return ret; ++ } ++ ++ ret = devm_add_action_or_reset(priv->dev, aspeed_release_rmem, ++ priv->dev); ++ if (ret) ++ return ret; ++ ++ ret = dma_set_mask_and_coherent(priv->dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ dev_err(priv->dev, "cannot set 64-bits DMA mask\n"); ++ return ret; ++ } ++ ++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * priv->match_data->packet_unit_size); ++ rx->data.vaddr = ++ dma_alloc_coherent(priv->dev, alloc_size, &rx->data.dma_handle, GFP_KERNEL); ++ ++ if (!rx->data.vaddr) ++ return -ENOMEM; ++ ++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * priv->match_data->rx_cmd_size); ++ rx->cmd.vaddr = dma_alloc_coherent(priv->dev, alloc_size, &rx->cmd.dma_handle, GFP_KERNEL); ++ ++ if (!rx->cmd.vaddr) ++ goto out_rx_cmd; ++ ++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->packet_unit_size); ++ tx->data.vaddr = ++ dma_alloc_coherent(priv->dev, alloc_size, &tx->data.dma_handle, GFP_KERNEL); ++ ++ if (!tx->data.vaddr) ++ goto out_tx_data; ++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->tx_cmd_size); ++ tx->cmd.vaddr = dma_alloc_coherent(priv->dev, alloc_size, &tx->cmd.dma_handle, GFP_KERNEL); ++ ++ if (!tx->cmd.vaddr) ++ goto out_tx_cmd; ++ ++ return 0; ++out_tx_cmd: ++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT * ++ priv->match_data->packet_unit_size); ++ dma_free_coherent(priv->dev, alloc_size, tx->data.vaddr, ++ tx->data.dma_handle); ++ ++out_tx_data: ++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * ++ priv->match_data->rx_cmd_size); ++ dma_free_coherent(priv->dev, alloc_size, rx->cmd.vaddr, ++ rx->cmd.dma_handle); ++ ++out_rx_cmd: ++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * ++ priv->match_data->packet_unit_size); ++ dma_free_coherent(priv->dev, alloc_size, rx->data.vaddr, ++ rx->data.dma_handle); ++ ++ return -ENOMEM; ++} ++ ++static void aspeed_mctp_dma_fini(struct aspeed_mctp *priv) ++{ ++ struct mctp_channel *tx = &priv->tx; ++ struct mctp_channel *rx = &priv->rx; ++ size_t free_size; ++ ++ free_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->tx_cmd_size); ++ dma_free_coherent(priv->dev, free_size, tx->cmd.vaddr, ++ tx->cmd.dma_handle); ++ ++ free_size = PAGE_ALIGN(priv->rx_packet_count * ++ priv->match_data->rx_cmd_size); ++ dma_free_coherent(priv->dev, free_size, rx->cmd.vaddr, ++ rx->cmd.dma_handle); ++ ++ free_size = PAGE_ALIGN(TX_PACKET_COUNT * ++ priv->match_data->packet_unit_size); ++ dma_free_coherent(priv->dev, free_size, tx->data.vaddr, ++ tx->data.dma_handle); ++ ++ free_size = PAGE_ALIGN(priv->rx_packet_count * ++ priv->match_data->packet_unit_size); ++ dma_free_coherent(priv->dev, free_size, rx->data.vaddr, ++ rx->data.dma_handle); ++} ++ ++static int aspeed_mctp_irq_init(struct aspeed_mctp *priv) ++{ ++ struct platform_device *pdev = to_platform_device(priv->dev); ++ int irq, ret; ++ ++ irq = platform_get_irq_byname_optional(pdev, "mctp"); ++ if (irq < 0) { ++ /* mctp irq is option */ ++ priv->miss_mctp_int = 1; ++ INIT_DELAYED_WORK(&priv->rx_det_dwork, aspeed_mctp_rx_detect_work); ++ } else { ++ ret = devm_request_irq(priv->dev, irq, aspeed_mctp_irq_handler, ++ IRQF_SHARED, dev_name(&pdev->dev), priv); ++ if (ret) ++ return ret; ++ aspeed_mctp_irq_enable(priv); ++ } ++ irq = platform_get_irq_byname(pdev, "pcie"); ++ if (!irq) ++ return -ENODEV; ++ ++ ret = devm_request_irq(priv->dev, irq, aspeed_mctp_pcie_rst_irq_handler, ++ IRQF_SHARED, dev_name(&pdev->dev), priv); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int aspeed_mctp_hw_reset(struct aspeed_mctp *priv) ++{ ++ int ret = 0; ++ ++ ret = reset_control_deassert(priv->reset); ++ if (ret) { ++ dev_warn(priv->dev, "Failed to deassert reset\n"); ++ return ret; ++ } ++ ++ if (priv->rc_f) { ++ ret = reset_control_deassert(priv->reset_dma); ++ if (ret) { ++ dev_warn(priv->dev, "Failed to deassert ep reset\n"); ++ return ret; ++ } ++ } ++ ++ if (priv->match_data->dma_need_64bits_width) ++ ret = pcie_vdm_enable(priv->dev); ++ ++ return ret; ++} ++ ++static int aspeed_mctp_probe(struct platform_device *pdev) ++{ ++ struct aspeed_mctp *priv; ++ int ret, id; ++ const char *name; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ priv->dev = &pdev->dev; ++ priv->rc_f = ++ of_find_property(priv->dev->of_node, "pcie_rc", NULL) ? 1 : 0; ++ priv->match_data = of_device_get_match_data(priv->dev); ++ ++ ret = device_property_read_u32(priv->dev, "aspeed,rx-packet-count", ++ &priv->rx_packet_count); ++ if (ret) { ++ priv->rx_packet_count = RX_PACKET_COUNT; ++ } else if (priv->rx_packet_count % 4 || ++ priv->rx_packet_count >= RX_MAX_PACKET_COUNT) { ++ dev_err(priv->dev, ++ "The aspeed,rx-packet-count:%d should be 4-aligned and less than %ld", ++ priv->rx_packet_count, RX_MAX_PACKET_COUNT); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ ret = device_property_read_u32(priv->dev, "aspeed,rx-ring-count", ++ &priv->rx_ring_count); ++ if (ret) ++ priv->rx_ring_count = RX_RING_COUNT; ++ ++ ret = device_property_read_u32(priv->dev, "aspeed,tx-ring-count", ++ &priv->tx_ring_count); ++ if (ret) ++ priv->tx_ring_count = TX_RING_COUNT; ++ ++ ret = device_property_read_u32(priv->dev, "aspeed,rx-det-period-us", ++ &priv->rx_det_period_us); ++ if (ret) ++ priv->rx_det_period_us = 1000; ++ ++ aspeed_mctp_drv_init(priv); ++ ++ ret = aspeed_mctp_resources_init(priv); ++ if (ret) { ++ dev_err(priv->dev, "Failed to init resources\n"); ++ goto out_drv; ++ } ++ ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++ struct net_device *ndev; ++ struct mctp_client *client; ++ ++ /** use priv's default client to send/receive mctp packets */ ++ client = aspeed_mctp_create_client(priv); ++ aspeed_mctp_register_default_handler(client); ++ ++ ndev = mctp_pcie_vdm_add_dev(priv->dev, &aspeed_mctp_pcie_vdm_ops); ++ if (IS_ERR(ndev)) { ++ dev_err(priv->dev, "Failed to add mctp pcie vdm device Err %ld\n", PTR_ERR(ndev)); ++ goto out_drv; ++ } ++ priv->ndev = ndev; ++#endif ++ ++ ret = aspeed_mctp_dma_init(priv); ++ if (ret) { ++ dev_err(priv->dev, "Failed to init DMA\n"); ++ goto out_drv; ++ } ++ ++ ret = aspeed_mctp_hw_reset(priv); ++ if (ret) ++ goto out_drv; ++ ++ aspeed_mctp_channels_init(priv); ++ ++ id = of_alias_get_id(priv->dev->of_node, "mctp"); ++ if (id < 0) ++ return id; ++ priv->mctp_miscdev.parent = priv->dev; ++ priv->mctp_miscdev.minor = MISC_DYNAMIC_MINOR; ++ priv->mctp_miscdev.name = devm_kasprintf(priv->dev, GFP_KERNEL, "aspeed-mctp%d", id); ++ priv->mctp_miscdev.fops = &aspeed_mctp_fops; ++ ret = misc_register(&priv->mctp_miscdev); ++ if (ret) { ++ dev_err(priv->dev, "Failed to register miscdev\n"); ++ goto out_dma; ++ } ++ priv->mctp_miscdev.this_device->type = &aspeed_mctp_type; ++ ++ ret = aspeed_mctp_irq_init(priv); ++ if (ret) { ++ dev_err(priv->dev, "Failed to init IRQ!\n"); ++ goto out_dma; ++ } ++ aspeed_mctp_pcie_setup(priv); ++ ++ name = devm_kasprintf(priv->dev, GFP_KERNEL, "peci-mctp%d", id); ++ priv->peci_mctp = ++ platform_device_register_data(priv->dev, name, PLATFORM_DEVID_NONE, NULL, 0); ++ if (IS_ERR(priv->peci_mctp)) ++ dev_err(priv->dev, "Failed to register peci-mctp device\n"); ++ ++ return 0; ++ ++out_dma: ++ aspeed_mctp_dma_fini(priv); ++out_drv: ++ aspeed_mctp_drv_fini(priv); ++out: ++ dev_err(&pdev->dev, "Failed to probe Aspeed MCTP: %d\n", ret); ++ return ret; ++} ++ ++static void aspeed_mctp_remove(struct platform_device *pdev) ++{ ++ struct aspeed_mctp *priv = platform_get_drvdata(pdev); ++ ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++ mctp_pcie_vdm_remove_dev(priv->ndev); ++#endif ++ ++ platform_device_unregister(priv->peci_mctp); ++ ++ misc_deregister(&priv->mctp_miscdev); ++ ++ aspeed_mctp_irq_disable(priv); ++ ++ aspeed_mctp_dma_fini(priv); ++ ++ aspeed_mctp_drv_fini(priv); ++} ++ ++static const struct aspeed_mctp_match_data ast2500_mctp_match_data = { ++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd), ++ .tx_cmd_size = sizeof(struct aspeed_mctp_tx_cmd), ++ .packet_unit_size = 128, ++ .need_address_mapping = true, ++ .vdm_hdr_direct_xfer = false, ++ .fifo_auto_surround = false, ++}; ++ ++static const struct aspeed_mctp_match_data ast2600_mctp_match_data = { ++ .rx_cmd_size = sizeof(u32), ++ .tx_cmd_size = sizeof(struct aspeed_mctp_tx_cmd), ++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data), ++ .need_address_mapping = false, ++ .vdm_hdr_direct_xfer = true, ++ .fifo_auto_surround = true, ++}; ++ ++static const struct aspeed_mctp_match_data ast2700_mctp0_match_data = { ++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd), ++ .tx_cmd_size = sizeof(struct aspeed_g7_mctp_tx_cmd), ++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data), ++ .need_address_mapping = false, ++ .vdm_hdr_direct_xfer = true, ++ .fifo_auto_surround = true, ++ .dma_need_64bits_width = true, ++ .scu_pcie_ctrl_offset = ASPEED_G7_SCU_PCIE0_CTRL_OFFSET, ++}; ++ ++static const struct aspeed_mctp_match_data ast2700_mctp1_match_data = { ++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd), ++ .tx_cmd_size = sizeof(struct aspeed_g7_mctp_tx_cmd), ++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data), ++ .need_address_mapping = false, ++ .vdm_hdr_direct_xfer = true, ++ .fifo_auto_surround = true, ++ .dma_need_64bits_width = true, ++ .scu_pcie_ctrl_offset = ASPEED_G7_SCU_PCIE1_CTRL_OFFSET, ++}; ++ ++static const struct of_device_id aspeed_mctp_match_table[] = { ++ { .compatible = "aspeed,ast2500-mctp", .data = &ast2500_mctp_match_data}, ++ { .compatible = "aspeed,ast2600-mctp", .data = &ast2600_mctp_match_data}, ++ { .compatible = "aspeed,ast2700-mctp0", .data = &ast2700_mctp0_match_data}, ++ { .compatible = "aspeed,ast2700-mctp1", .data = &ast2700_mctp1_match_data}, ++ { } ++}; ++ ++static struct platform_driver aspeed_mctp_driver = { ++ .driver = { ++ .name = "aspeed-mctp", ++ .of_match_table = of_match_ptr(aspeed_mctp_match_table), ++ }, ++ .probe = aspeed_mctp_probe, ++ .remove = aspeed_mctp_remove, ++}; ++ ++static int __init aspeed_mctp_init(void) ++{ ++ packet_cache = ++ kmem_cache_create_usercopy("mctp-packet", ++ sizeof(struct mctp_pcie_packet), ++ 0, 0, 0, ++ sizeof(struct mctp_pcie_packet), ++ NULL); ++ if (!packet_cache) ++ return -ENOMEM; ++ ++ return platform_driver_register(&aspeed_mctp_driver); ++} ++ ++static void __exit aspeed_mctp_exit(void) ++{ ++ platform_driver_unregister(&aspeed_mctp_driver); ++ kmem_cache_destroy(packet_cache); ++} ++ ++module_init(aspeed_mctp_init) ++module_exit(aspeed_mctp_exit) ++ ++MODULE_DEVICE_TABLE(of, aspeed_mctp_match_table); ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Iwona Winiarska "); ++MODULE_DESCRIPTION("Aspeed MCTP driver"); +diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c +--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c 2025-12-23 10:16:21.124032669 +0000 +@@ -431,7 +431,7 @@ + .of_match_table = aspeed_p2a_ctrl_match, + }, + .probe = aspeed_p2a_ctrl_probe, +- .remove_new = aspeed_p2a_ctrl_remove, ++ .remove = aspeed_p2a_ctrl_remove, + }; + + module_platform_driver(aspeed_p2a_ctrl_driver); +diff --git a/drivers/soc/aspeed/aspeed-pcie-mmbi.c b/drivers/soc/aspeed/aspeed-pcie-mmbi.c +--- a/drivers/soc/aspeed/aspeed-pcie-mmbi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-pcie-mmbi.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,962 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright (C) ASPEED Technology Inc. ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "aspeed-pcie-mmbi.h" ++ ++/* AST2700 E2M */ ++#define ASPEED_E2M_EVENT 0x0D0 ++#define ASPEED_E2M_EVENT_SET 0x0D4 ++#define ASPEED_E2M_EVENT_CLR 0x0D8 ++#define ASPEED_E2M_EVENT_EN 0x0DC ++#define ASPEED_E2M_ADRMAP00 0x100 ++#define ASPEED_E2M_WIRQA0 0x180 ++#define ASPEED_E2M_WIRQV0 0x1C0 ++#define ASPEED_E2M_SPROT_SIDG0 0x210 ++#define ASPEED_E2M_SPROT_CTL0 0x280 ++#define ASPEED_E2M_SPROT_ADR0 0x2C0 ++ ++/* AST2700 SCU */ ++#define ASPEED_SCU_DECODE_DEV BIT(18) ++#define ASPEED_SCU_INT_EN BIT(23) ++struct aspeed_platform { ++ int (*mmbi_init)(struct platform_device *pdev); ++}; ++ ++struct aspeed_pcie_mmbi { ++ struct device *dev; ++ struct regmap *device; ++ struct regmap *e2m; ++ int irq; ++ const struct aspeed_platform *platform; ++ /* E2M index */ ++ int id; ++ int pid; ++ int scu_bar_offset; ++ int e2m_index; ++ int e2m_h2b_int; ++ ++ /* Memory Mapping */ ++ void __iomem *mem_virt; ++ dma_addr_t mem_phy; ++ phys_addr_t mem_size; ++ ++ struct aspeed_mmbi_channel chan; ++}; ++ ++static void mmbi_desc_init(struct aspeed_mmbi_channel *chan); ++ ++static u8 mmbi_get_bmc_up(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_up; ++} ++ ++static u8 mmbi_get_bmc_rdy(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_rdy; ++} ++ ++static u8 mmbi_get_bmc_rst(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ ++ return hros.b_rst; ++} ++ ++static u8 mmbi_get_host_rst(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ return hrws.h_rst; ++} ++ ++static u8 mmbi_get_host_rdy(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ return hrws.h_rdy; ++} ++ ++static u8 mmbi_get_host_up(struct aspeed_mmbi_channel *chan) ++{ ++ struct host_rws hrws; ++ ++ memcpy_fromio(&hrws, chan->hrws_vmem, sizeof(hrws)); ++ ++ return hrws.h_up; ++} ++ ++static void mmbi_set_bmc_rst(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ hros.b_rst = set; ++ memcpy_toio(chan->hros_vmem, &hros, sizeof(hros)); ++} ++ ++static void mmbi_set_bmc_rdy(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ hros.b_rdy = set; ++ memcpy_toio(chan->hros_vmem, &hros, sizeof(hros)); ++} ++ ++static void mmbi_set_bmc_up(struct aspeed_mmbi_channel *chan, bool set) ++{ ++ struct host_ros hros; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ hros.b_up = set; ++ memcpy_toio(chan->hros_vmem, &hros, sizeof(hros)); ++} ++ ++static void raise_b2h_interrupt(struct aspeed_mmbi_channel *chan) ++{ ++ if (!chan->host_int_en) ++ return; ++ ++ regmap_write(chan->mmbi->e2m, ASPEED_E2M_EVENT_SET, BIT(chan->mmbi->e2m_h2b_int)); ++} ++ ++static void mmbi_clear_hros(struct aspeed_mmbi_channel *chan) ++{ ++ memset_io(chan->hros_vmem, 0, sizeof(struct host_ros)); ++} ++ ++static void mmbi_clear_hrws(struct aspeed_mmbi_channel *chan) ++{ ++ memset_io(chan->hrws_vmem, 0, sizeof(struct host_rws)); ++} ++ ++static void get_b2h_avail_buf_len(struct aspeed_mmbi_channel *chan, ssize_t *avail_buf_len) ++{ ++ struct device *dev = chan->dev; ++ u32 b2h_rp, b2h_wp; ++ ++ b2h_rp = GET_B2H_READ_POINTER(chan); ++ b2h_wp = GET_B2H_WRITE_POINTER(chan); ++ dev_dbg(dev, "MMBI B2H - b2h_rp: 0x%0x, b2h_wp: 0x%0x\n", b2h_rp, b2h_wp); ++ ++ if (b2h_wp >= b2h_rp) ++ *avail_buf_len = chan->b2h_cb_size - b2h_wp + b2h_rp; ++ else ++ *avail_buf_len = b2h_rp - b2h_wp; ++} ++ ++static u8 mmbi_get_state(struct aspeed_mmbi_channel *chan) ++{ ++ u8 state = 0; ++ ++ state = mmbi_get_bmc_up(chan) << 3; ++ state |= mmbi_get_bmc_rst(chan) << 2; ++ state |= mmbi_get_host_up(chan) << 1; ++ state |= mmbi_get_host_rst(chan); ++ ++ return state; ++} ++ ++static int get_mmbi_header(struct aspeed_mmbi_channel *chan, u32 *data_length, ++ u8 *type, u32 *unread_data_len, u8 *padding) ++{ ++ u32 h2b_wp, h2b_rp, b2h_wp, b2h_rp; ++ struct device *dev = chan->dev; ++ struct mmbi_header header; ++ ++ h2b_wp = GET_H2B_WRITE_POINTER(chan); ++ h2b_rp = GET_H2B_READ_POINTER(chan); ++ b2h_wp = GET_B2H_WRITE_POINTER(chan); ++ b2h_rp = GET_B2H_READ_POINTER(chan); ++ dev_dbg(dev, "MMBI HRWS - h2b_wp: 0x%0x, b2h_rp: 0x%0x\n", h2b_wp, ++ b2h_rp); ++ dev_dbg(dev, "MMBI HROS - b2h_wp: 0x%0x, h2b_rp: 0x%0x\n", b2h_wp, ++ h2b_rp); ++ ++ if (h2b_wp >= h2b_rp) ++ *unread_data_len = h2b_wp - h2b_rp; ++ else ++ *unread_data_len = chan->h2b_cb_size - h2b_rp + h2b_wp; ++ ++ if (*unread_data_len < sizeof(struct mmbi_header)) { ++ dev_dbg(dev, "No data to read(%d - %d)\n", h2b_wp, h2b_rp); ++ return -EAGAIN; ++ } ++ ++ dev_dbg(dev, "READ MMBI header from: 0x%lx\n", ++ (ssize_t)(chan->h2b_cb_vmem + h2b_rp)); ++ ++ /* Extract MMBI protocol - protocol type and length */ ++ if ((h2b_rp + sizeof(header)) <= chan->h2b_cb_size) { ++ memcpy_fromio(&header, chan->h2b_cb_vmem + h2b_rp, ++ sizeof(header)); ++ } else { ++ ssize_t chunk_len = chan->h2b_cb_size - h2b_rp; ++ ++ memcpy_fromio(&header, chan->h2b_cb_vmem + h2b_rp, chunk_len); ++ memcpy_fromio(((u8 *)&header) + chunk_len, chan->h2b_cb_vmem, ++ sizeof(header) - chunk_len); ++ } ++ ++ *data_length = (header.pkt_len << 2) - sizeof(header) - header.pkt_pad; ++ *padding = header.pkt_pad; ++ *type = header.pkt_type; ++ ++ return 0; ++} ++ ++static int mmbi_state_check(struct aspeed_mmbi_channel *chan) ++{ ++ enum mmbi_state current_state = mmbi_get_state(chan); ++ struct device *dev = chan->dev; ++ u32 req_data_len, unread_data_len; ++ u8 type, padding; ++ ++ switch (current_state) { ++ case INIT_MISMATCH: ++ dev_dbg(dev, "Get INIT_MISMATCH state from HOST"); ++ /* Reset MMBI data structure */ ++ mmbi_desc_init(chan); ++ /* Translat state to INIT_IN_PROGRESS */ ++ mmbi_clear_hros(chan); ++ mmbi_clear_hrws(chan); ++ /* Translat state to INIT_COMPLETED*/ ++ mmbi_set_bmc_up(chan, 1); ++ ++ dev_dbg(dev, "Change state to INIT_COMPLETED to HOST"); ++ raise_b2h_interrupt(chan); ++ return 1; ++ case NORMAL_RUNTIME: ++ if (mmbi_get_bmc_rdy(chan)) ++ return 0; ++ dev_dbg(dev, "Get NORMAL_RUNTIME state from HOST"); ++ mmbi_set_bmc_rdy(chan, 1); ++ return 1; ++ case RESET_REQ_BY_HOST: ++ dev_dbg(dev, "Get RESET_REQ_BY_HOST state from HOST"); ++ /* Stop operation */ ++ mmbi_set_bmc_rdy(chan, 0); ++ /* Change state to RESET_ACKED */ ++ mmbi_set_bmc_rst(chan, 1); ++ raise_b2h_interrupt(chan); ++ /* Change state to TRANS_TO_INIT */ ++ mmbi_set_bmc_up(chan, 0); ++ /* Reset MMBI data structure */ ++ mmbi_desc_init(chan); ++ /* Translat state to INIT_IN_PROGRESS */ ++ mmbi_clear_hros(chan); ++ mmbi_clear_hrws(chan); ++ /* Translat state to INIT_COMPLETED*/ ++ mmbi_set_bmc_up(chan, 1); ++ ++ dev_dbg(dev, "Change state to INIT_COMPLETED to HOST"); ++ raise_b2h_interrupt(chan); ++ return 1; ++ case RESET_ACKED: ++ /* Receive all packet from Host */ ++ while (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) == 0 && ++ mmbi_get_state(chan) == RESET_ACKED) ++ ; ++ /* Change state to TRANS_TO_INIT */ ++ mmbi_set_bmc_up(chan, 0); ++ /* Reset MMBI data structure */ ++ mmbi_desc_init(chan); ++ /* Translat state to INIT_IN_PROGRESS */ ++ mmbi_clear_hros(chan); ++ mmbi_clear_hrws(chan); ++ /* Translat state to INIT_COMPLETED*/ ++ mmbi_set_bmc_up(chan, 1); ++ ++ dev_dbg(dev, "Change state to INIT_COMPLETED to HOST"); ++ raise_b2h_interrupt(chan); ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++static void update_host_ros(struct aspeed_mmbi_channel *chan, unsigned int w_len, ++ unsigned int r_len) ++{ ++ struct device *dev = chan->dev; ++ struct host_ros hros; ++ u32 h2b_rp, b2h_wp; ++ ++ b2h_wp = GET_B2H_WRITE_POINTER(chan); ++ h2b_rp = GET_H2B_READ_POINTER(chan); ++ ++ /* Advance the B2H CB offset for next write */ ++ if ((b2h_wp + w_len) <= chan->b2h_cb_size) ++ b2h_wp += w_len; ++ else ++ b2h_wp = b2h_wp + w_len - chan->b2h_cb_size; ++ ++ /* Advance the H2B CB offset till where BMC read data */ ++ if ((h2b_rp + r_len) <= chan->h2b_cb_size) ++ h2b_rp += r_len; ++ else ++ h2b_rp = h2b_rp + r_len - chan->h2b_cb_size; ++ ++ memcpy_fromio(&hros, chan->hros_vmem, sizeof(hros)); ++ hros.b2h_wp = FIELD_GET(B2H_WRITE_POINTER_MASK, b2h_wp); ++ hros.h2b_rp = FIELD_GET(H2B_READ_POINTER_MASK, h2b_rp); ++ memcpy_toio(chan->hros_vmem, &hros, sizeof(hros)); ++ dev_dbg(dev, "Updating HROS - h2b_rp: 0x%0x, b2h_wp: 0x%0x\n", h2b_rp, b2h_wp); ++ ++ if (w_len != 0) ++ raise_b2h_interrupt(chan); ++} ++ ++static int aspeed_mmbi_write(struct aspeed_mmbi_channel *chan, char *buffer, size_t len, ++ protocol_type type) ++{ ++ struct device *dev = chan->dev; ++ struct mmbi_header header = { 0 }; ++ ssize_t avail_buf_len; ++ ssize_t total_len; ++ ssize_t wt_offset; ++ ssize_t chunk_len; ++ ssize_t end_offset; ++ u8 padding = 0; ++ ++ /* If HOST READY bit is not set, Just discard the write. */ ++ if (!GET_HOST_READY_BIT(chan)) { ++ dev_dbg(dev, "Host not ready, discarding request...\n"); ++ return -EAGAIN; ++ } ++ ++ get_b2h_avail_buf_len(chan, &avail_buf_len); ++ ++ dev_dbg(dev, "B2H buffer empty space: %ld\n", avail_buf_len); ++ ++ /* Header size */ ++ total_len = len + 4; ++ ++ padding = total_len & 0x3; ++ if (padding) ++ padding = 4 - padding; ++ total_len += padding; ++ ++ /* Empty space should be more than write request data size */ ++ if (avail_buf_len <= sizeof(header) || (total_len > (avail_buf_len - sizeof(header)))) ++ return -ENOSPC; ++ ++ /* Fill multi-protocol header */ ++ header.pkt_type = type; ++ header.pkt_len = total_len >> 2; ++ header.pkt_pad = padding; ++ ++ wt_offset = GET_B2H_WRITE_POINTER(chan); ++ end_offset = chan->b2h_cb_size; ++ ++ /* Copy Header */ ++ if ((end_offset - wt_offset) >= sizeof(header)) { ++ memcpy_toio(chan->b2h_cb_vmem + wt_offset, &header, sizeof(header)); ++ wt_offset += sizeof(header); ++ } else { ++ chunk_len = end_offset - wt_offset; ++ memcpy_toio(chan->b2h_cb_vmem + wt_offset, &header, chunk_len); ++ memcpy_toio(chan->b2h_cb_vmem, &header + chunk_len, (sizeof(header) - chunk_len)); ++ wt_offset = (sizeof(header) - chunk_len); ++ } ++ ++ /* Write the data */ ++ if ((end_offset - wt_offset) >= len) { ++ memcpy_toio(&chan->b2h_cb_vmem[wt_offset], buffer, len); ++ wt_offset += len; ++ } else { ++ chunk_len = end_offset - wt_offset; ++ dev_dbg(dev, "Write data chunk_len: %ld\n", chunk_len); ++ memcpy_toio(&chan->b2h_cb_vmem[wt_offset], buffer, chunk_len); ++ ++ wt_offset = 0; ++ memcpy_toio(&chan->b2h_cb_vmem[wt_offset], buffer + chunk_len, len - chunk_len); ++ wt_offset += len - chunk_len; ++ } ++ ++ update_host_ros(chan, total_len, 0); ++ ++ return 0; ++} ++ ++static void aspeed_mmbi_read(struct aspeed_mmbi_channel *chan, char *buffer, size_t len, u8 padding) ++{ ++ struct device *dev = chan->dev; ++ ssize_t rd_offset; ++ u32 h2b_rp; ++ ++ h2b_rp = GET_H2B_READ_POINTER(chan); ++ if ((h2b_rp + sizeof(struct mmbi_header)) <= chan->h2b_cb_size) ++ rd_offset = h2b_rp + sizeof(struct mmbi_header); ++ else ++ rd_offset = h2b_rp + sizeof(struct mmbi_header) - chan->h2b_cb_size; ++ ++ /* Extract data and copy to user space application */ ++ dev_dbg(dev, "READ MMBI Data from: 0x%0lx and length: %ld\n", ++ (ssize_t)(chan->h2b_cb_vmem + rd_offset), len); ++ ++ if ((chan->h2b_cb_size - rd_offset) >= len) { ++ memcpy_fromio(buffer, chan->h2b_cb_vmem + rd_offset, len); ++ rd_offset += len; ++ } else { ++ ssize_t chunk_len; ++ ++ chunk_len = chan->h2b_cb_size - rd_offset; ++ dev_dbg(dev, "Read data chunk_len: %ld\n", chunk_len); ++ memcpy_fromio(buffer, chan->h2b_cb_vmem + rd_offset, chunk_len); ++ ++ rd_offset = 0; ++ memcpy_fromio(buffer + chunk_len, chan->h2b_cb_vmem + rd_offset, len - chunk_len); ++ } ++ ++ update_host_ros(chan, 0, len + sizeof(struct mmbi_header) + padding); ++} ++ ++static void mctp_mmbi_rx(struct aspeed_mmbi_channel *chan) ++{ ++ struct net_device *ndev = chan->ndev; ++ struct sk_buff *skb; ++ struct mctp_skb_cb *cb; ++ u32 req_data_len, unread_data_len; ++ u8 type, padding; ++ int status; ++ ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ return; ++ ++ dev_dbg(chan->dev, "%s: Length: 0x%0x, Protocol Type: %d, Unread data: %d\n", __func__, ++ req_data_len, type, unread_data_len); ++ ++ skb = netdev_alloc_skb(ndev, req_data_len); ++ if (!skb) { ++ ndev->stats.rx_dropped++; ++ update_host_ros(chan, 0, req_data_len + sizeof(struct mmbi_header)); ++ return; ++ } ++ ++ skb->protocol = htons(ETH_P_MCTP); ++ aspeed_mmbi_read(chan, skb_put(skb, req_data_len), req_data_len, padding); ++ skb_reset_network_header(skb); ++ ++ cb = __mctp_cb(skb); ++ cb->halen = 0; ++ ++ status = netif_rx(skb); ++ if (status == NET_RX_SUCCESS) { ++ ndev->stats.rx_packets++; ++ ndev->stats.rx_bytes += req_data_len; ++ } else { ++ ndev->stats.rx_dropped++; ++ } ++} ++ ++static netdev_tx_t mctp_mmbi_tx(struct sk_buff *skb, struct net_device *ndev) ++{ ++ struct aspeed_mmbi_mctp *mctp = netdev_priv(ndev); ++ int ret; ++ ++ if (!mmbi_get_host_rdy(&mctp->mmbi->chan) || skb->len > MCTP_MMBI_MTU_MAX) { ++ ndev->stats.tx_dropped++; ++ goto out; ++ } ++ ++ ret = aspeed_mmbi_write(&mctp->mmbi->chan, skb->data, skb->len, MMBI_PROTOCOL_MCTP); ++ if (ret) { ++ netif_stop_queue(ndev); ++ return NETDEV_TX_BUSY; ++ } ++ ++ ndev->stats.tx_packets++; ++ ndev->stats.tx_bytes += skb->len; ++out: ++ kfree_skb(skb); ++ return NETDEV_TX_OK; ++} ++ ++static const struct net_device_ops mctp_mmbi_netdev_ops = { ++ .ndo_start_xmit = mctp_mmbi_tx, ++}; ++ ++static void aspeed_mctp_mmbi_setup(struct net_device *ndev) ++{ ++ ndev->type = ARPHRD_MCTP; ++ ++ /* we limit at the fixed MTU, which is also the MCTP-standard ++ * baseline MTU, so is also our minimum ++ */ ++ ndev->mtu = MCTP_MMBI_MTU; ++ ndev->max_mtu = MCTP_MMBI_MTU_MAX; ++ ndev->min_mtu = MCTP_MMBI_MTU_MIN; ++ ++ ndev->hard_header_len = 0; ++ ndev->addr_len = 0; ++ ndev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; ++ ndev->flags = IFF_NOARP; ++ ndev->netdev_ops = &mctp_mmbi_netdev_ops; ++ ndev->needs_free_netdev = true; ++} ++ ++static int aspeed_mmbi_mctp_init(struct aspeed_mmbi_channel *chan) ++{ ++ struct aspeed_mmbi_mctp *mctp; ++ struct net_device *ndev; ++ char name[32]; ++ int ret; ++ ++ snprintf(name, sizeof(name), "mctpmmbi%d%d", chan->mmbi->id, chan->mmbi->e2m_index); ++ ndev = alloc_netdev(sizeof(*mctp), name, NET_NAME_ENUM, aspeed_mctp_mmbi_setup); ++ if (!ndev) ++ return -ENOMEM; ++ ++ mctp = netdev_priv(ndev); ++ mctp->ndev = ndev; ++ mctp->mmbi = chan->mmbi; ++ ++ chan->ndev = ndev; ++ ++ ret = register_netdev(ndev); ++ if (ret) ++ goto free_netdev; ++ ++ return 0; ++ ++free_netdev: ++ free_netdev(ndev); ++ ++ return ret; ++} ++ ++static void aspeed_mmbi_work_func(struct work_struct *workq) ++{ ++ struct aspeed_mmbi_channel *chan = container_of(workq, struct aspeed_mmbi_channel, work); ++ u32 weight = 256, req_data_len, unread_data_len; ++ u8 type, padding; ++ int i; ++ ++ for (i = 0; i < weight; i++) { ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ return; ++ ++ dev_dbg(chan->dev, "%s: Length: 0x%0x, Protocol Type: %d\n", ++ __func__, req_data_len, type); ++ ++ if (type == MMBI_PROTOCOL_MCTP) ++ mctp_mmbi_rx(chan); ++ else ++ /* Discard data and advance the hrws */ ++ update_host_ros(chan, 0, req_data_len + sizeof(struct mmbi_header) + padding); ++ ++ raise_b2h_interrupt(chan); ++ } ++ ++ if (get_mmbi_header(chan, &req_data_len, &type, &unread_data_len, &padding) != 0) ++ queue_work(system_unbound_wq, &chan->work); ++} ++ ++static irqreturn_t aspeed_pcie_mmbi_isr(int irq, void *dev_id) ++{ ++ struct aspeed_pcie_mmbi *mmbi = dev_id; ++ struct aspeed_mmbi_channel *chan = &mmbi->chan; ++ ssize_t avail_buf_len; ++ ++ get_b2h_avail_buf_len(chan, &avail_buf_len); ++ if (avail_buf_len > MCTP_MMBI_MTU_MAX) { ++ if (netif_queue_stopped(chan->ndev)) { ++ dev_dbg(chan->dev, "Wake up mctp net device\n"); ++ netif_wake_queue(chan->ndev); ++ } ++ } ++ ++ if (mmbi_state_check(chan)) ++ return IRQ_HANDLED; ++ ++ queue_work(system_unbound_wq, &chan->work); ++ ++ return IRQ_HANDLED; ++} ++ ++static void mmbi_desc_init(struct aspeed_mmbi_channel *chan) ++{ ++ struct mmbi_cap_desc desc; ++ ++ memset(&desc, 0, sizeof(struct mmbi_cap_desc)); ++ ++ desc.version = 1; ++ /* This MMBI interface is intended for OS use */ ++ desc.os_use = 1; ++ desc.b2h_ba = (chan->b2h_cb_vmem - chan->desc_vmem) >> 3; ++ desc.h2b_ba = (chan->h2b_cb_vmem - chan->desc_vmem) >> 3; ++ /* Make sure the buffer size is 4 byte aligmnent */ ++ desc.b2h_l = chan->b2h_cb_size & ~0x3; ++ desc.h2b_l = chan->h2b_cb_size & ~0x3; ++ /* Variable Packet Size Circular Buffers (VPSCB) v1 */ ++ desc.buffer_type = 0x01; ++ desc.bt_desc.h_ros_p = (chan->hros_vmem - chan->desc_vmem) >> 3; ++ desc.bt_desc.h_rws_p = (chan->hrws_vmem - chan->desc_vmem) >> 3; ++ /* PCIe Interrupt */ ++ desc.bt_desc.h_int_t = 0x01; ++ desc.bt_desc.h_int_l = chan->host_int_location; ++ desc.bt_desc.h_int_v = 0; /* Skip for PCIe Interrupt */ ++ desc.bt_desc.bmc_int_t = 0x01; /* relative memory space address */ ++ desc.bt_desc.bmc_int_l = chan->bmc_int_location; ++ desc.bt_desc.bmc_int_v = chan->bmc_int_value; ++ ++ /* Per MMBI protoco spec, Set it to "#MMBI$" */ ++ memcpy(desc.signature, MMBI_SIGNATURE, sizeof(desc.signature)); ++ ++ memcpy_toio(chan->desc_vmem, &desc, sizeof(desc)); ++} ++ ++static int aspeed_pcie_mmbi_init(struct aspeed_pcie_mmbi *mmbi) ++{ ++ struct aspeed_mmbi_channel *chan = &mmbi->chan; ++ struct device *dev = chan->dev; ++ u32 b2h_size = mmbi->mem_size >> 1; ++ u32 h2b_size = mmbi->mem_size >> 1; ++ u8 *h2b_vaddr, *b2h_vaddr; ++ int ret; ++ ++ b2h_vaddr = mmbi->mem_virt; ++ h2b_vaddr = b2h_vaddr + b2h_size; ++ ++ chan->dev = dev; ++ chan->desc_vmem = b2h_vaddr; ++ chan->hros_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc); ++ chan->b2h_cb_vmem = b2h_vaddr + sizeof(struct mmbi_cap_desc) + sizeof(struct host_ros); ++ chan->b2h_cb_size = b2h_size - sizeof(struct mmbi_cap_desc) - sizeof(struct host_ros); ++ ++ chan->hrws_vmem = h2b_vaddr; ++ chan->h2b_cb_vmem = h2b_vaddr + sizeof(struct host_rws); ++ chan->h2b_cb_size = h2b_size - sizeof(struct host_rws); ++ ++ dev_dbg(dev, "B2H mapped addr - desc: 0x%0lx, hros: 0x%0lx, b2h_cb: 0x%0lx\n", ++ (size_t)chan->desc_vmem, (size_t)chan->hros_vmem, (size_t)chan->b2h_cb_vmem); ++ dev_dbg(dev, "H2B mapped addr - hrws: 0x%0lx, h2b_cb: 0x%0lx\n", (size_t)chan->hrws_vmem, ++ (size_t)chan->h2b_cb_vmem); ++ ++ dev_dbg(dev, "B2H buffer size: 0x%0lx\n", (size_t)chan->b2h_cb_size); ++ dev_dbg(dev, "H2B buffer size: 0x%0lx\n", (size_t)chan->h2b_cb_size); ++ ++ /* Initialize the MMBI channel descriptor */ ++ mmbi_desc_init(chan); ++ ++ /* Clear HRWS & HROS */ ++ mmbi_clear_hros(chan); ++ mmbi_clear_hrws(chan); ++ ++ /* Initialize MTCP function */ ++ ret = aspeed_mmbi_mctp_init(chan); ++ if (ret) { ++ dev_err(dev, "Unable to init mctp\n"); ++ return ret; ++ } ++ ++ /* Set BMC UP bit */ ++ mmbi_set_bmc_up(chan, 1); ++ ++ return 0; ++} ++ ++/* ++ * AST2700 PCIe MMBI (SCU & E2M) ++ * SoC | 0 | 1 | ++ * PCI class | MFD (0xFF_00_00) | MMBI (0x0C_0C_00) | ++ * Node | 0 1 | 0 | ++ * PID | 3 4 5 6 11 12 13 14 | 2 3 4 5 6 7 | ++ * E2M index | 0 1 2 3 4 5 6 7 | 0 1 2 3 4 5 | ++ * BAR index | 2 3 4 5 2 3 4 5 | 0 1 2 3 4 5 | ++ * SCU BAR | 3c 4c 5c 6c 3c 4c 5c 6c | 1c 50 3c 4c 5c 6c | ++ * E2M H2B Int | 0 1 2 3 0 1 2 3 | 0 1 2 3 4 5 | (bit) ++ */ ++static int aspeed_ast2700_pcie_mmbi_init(struct platform_device *pdev) ++{ ++ struct aspeed_pcie_mmbi *mmbi = platform_get_drvdata(pdev); ++ struct aspeed_mmbi_channel *chan = &mmbi->chan; ++ struct device *dev = &pdev->dev; ++ u32 value, sprot_size, e2m_index, pid; ++ struct resource res; ++ int ret, i; ++ ++ /* Get register map*/ ++ mmbi->e2m = syscon_node_to_regmap(dev->of_node->parent); ++ if (IS_ERR(mmbi->e2m)) { ++ dev_err(dev, "failed to find e2m regmap\n"); ++ return PTR_ERR(mmbi->e2m); ++ } ++ if (of_address_to_resource(dev->of_node->parent, 0, &res)) { ++ dev_err(dev, "Failed to get e2m resource\n"); ++ return -EINVAL; ++ } ++ if (res.start == 0x14c1d000) ++ mmbi->id = 2; ++ else if (res.start == 0x12c22000) ++ mmbi->id = 1; ++ else ++ mmbi->id = 0; /* 0x12c21000 */ ++ ++ mmbi->device = syscon_regmap_lookup_by_phandle(dev->of_node->parent, "aspeed,device"); ++ if (IS_ERR(mmbi->device)) { ++ dev_err(dev, "failed to find device regmap\n"); ++ return PTR_ERR(mmbi->device); ++ } ++ ++ ret = of_property_read_u32(dev->of_node, "index", &mmbi->e2m_index); ++ if (ret < 0) { ++ dev_err(dev, "cannot get mmbi index value\n"); ++ return ret; ++ } ++ ++ ret = of_property_read_u32(dev->of_node, "pid", &mmbi->pid); ++ if (ret < 0) { ++ dev_err(dev, "cannot get mmbi pid value\n"); ++ return ret; ++ } ++ ++ ret = of_property_read_u32(dev->of_node, "bar", &mmbi->scu_bar_offset); ++ if (ret < 0) { ++ dev_err(dev, "cannot get mmbi bar value\n"); ++ return ret; ++ } ++ ++ e2m_index = mmbi->e2m_index; ++ pid = mmbi->pid; ++ mmbi->e2m_h2b_int += mmbi->e2m_index; ++ if (mmbi->id < 2) { ++ /* PCIe device class, sub-class, protocol and reversion */ ++ regmap_write(mmbi->device, 0x18, 0xFF000027); ++ } else { ++ regmap_write(mmbi->device, 0x18, 0x0C0C0027); ++ regmap_write(mmbi->device, 0x78, ASPEED_SCU_INT_EN | ASPEED_SCU_DECODE_DEV); ++ } ++ ++ /* MSI */ ++ regmap_update_bits(mmbi->device, 0x74, GENMASK(7, 4), BIT(7) | (5 << 4)); ++ ++ regmap_update_bits(mmbi->device, 0x70, BIT(25) | BIT(17) | BIT(9) | BIT(1), ++ BIT(25) | BIT(17) | BIT(9) | BIT(1)); ++ ++ /* Calculate the BAR Size */ ++ for (i = 1; i < 16; i++) { ++ /* bar size check for 4k align */ ++ if ((mmbi->mem_size / 4096) == (1 << (i - 1))) ++ break; ++ } ++ if (i == 16) { ++ i = 0; ++ dev_warn(dev, "Bar size not align for 4K : %dK\n", (u32)mmbi->mem_size / 1024); ++ } ++ regmap_write(mmbi->device, mmbi->scu_bar_offset, (mmbi->mem_phy >> 4) | i); ++ regmap_write(mmbi->e2m, ASPEED_E2M_ADRMAP00 + (4 * pid), (mmbi->mem_phy >> 4) | i); ++ ++ /* BMC Interrupt */ ++ if (chan->bmc_int_en) { ++ value = mmbi->mem_phy + chan->bmc_int_location; ++ regmap_write(mmbi->e2m, ASPEED_E2M_WIRQA0 + (4 * e2m_index), value); ++ value = (BIT(16) << pid) | chan->bmc_int_value; ++ regmap_write(mmbi->e2m, ASPEED_E2M_WIRQV0 + (4 * e2m_index), value); ++ } ++ ++ /* HOST Interrupt: MSI */ ++ regmap_read(mmbi->e2m, ASPEED_E2M_EVENT_EN, &value); ++ value |= BIT(mmbi->e2m_h2b_int); ++ regmap_write(mmbi->e2m, ASPEED_E2M_EVENT_EN, value); ++ ++ /* B2H Write Protect */ ++ sprot_size = (mmbi->mem_size / 2) / SZ_1M; ++ value = (sprot_size << 16) | (mmbi->mem_phy >> 20); ++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_ADR0 + (4 * e2m_index), value); ++ /* Enable read & disalbe write */ ++ value = 1 << (8 + e2m_index); ++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_CTL0 + (4 * e2m_index), value); ++ /* Set PID */ ++ regmap_read(mmbi->e2m, ASPEED_E2M_SPROT_SIDG0 + (4 * (e2m_index / 4)), &value); ++ value |= pid << (8 * (e2m_index % 4)); ++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_SIDG0 + (4 * (e2m_index / 4)), value); ++ ++ mmbi->chan.dev = dev; ++ mmbi->chan.mmbi = mmbi; ++ ret = aspeed_pcie_mmbi_init(mmbi); ++ if (ret < 0) { ++ dev_err(dev, "Initialize MMBI device failed.\n"); ++ return ret; ++ } ++ ++ INIT_WORK(&chan->work, aspeed_mmbi_work_func); ++ ++ return 0; ++} ++ ++struct aspeed_platform ast2700_platform = { ++ .mmbi_init = aspeed_ast2700_pcie_mmbi_init, ++}; ++ ++static const struct of_device_id aspeed_pcie_mmbi_of_matches[] = { ++ { .compatible = "aspeed,ast2700-pcie-mmbi", .data = &ast2700_platform }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, aspeed_pcie_mmbi_of_matches); ++ ++static int aspeed_pcie_mmbi_probe(struct platform_device *pdev) ++{ ++ struct aspeed_pcie_mmbi *mmbi; ++ struct aspeed_mmbi_channel *chan; ++ struct device *dev = &pdev->dev; ++ struct resource res; ++ struct device_node *np; ++ const void *md; ++ int ret = 0; ++ ++ md = of_device_get_match_data(dev); ++ if (!md) ++ return -ENODEV; ++ ++ mmbi = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_pcie_mmbi), GFP_KERNEL); ++ if (!mmbi) ++ return -ENOMEM; ++ dev_set_drvdata(dev, mmbi); ++ ++ mmbi->dev = dev; ++ mmbi->platform = md; ++ ++ /* Get MMBI memory size */ ++ np = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (!np || of_address_to_resource(np, 0, &res)) { ++ dev_err(dev, "Failed to find memory-region.\n"); ++ ret = -ENOMEM; ++ goto out_region; ++ } ++ ++ of_node_put(np); ++ ++ mmbi->mem_phy = res.start; ++ mmbi->mem_size = resource_size(&res); ++ mmbi->mem_virt = ioremap(mmbi->mem_phy, mmbi->mem_size); ++ if (!mmbi->mem_virt) { ++ dev_err(dev, "cannot map mmbi memory region\n"); ++ ret = -ENOMEM; ++ goto out_region; ++ } ++ ++ /* Get IRQ */ ++ mmbi->irq = platform_get_irq(pdev, 0); ++ if (mmbi->irq < 0) { ++ dev_err(&pdev->dev, "platform get of irq[=%d] failed!\n", mmbi->irq); ++ goto out_unmap; ++ } ++ ret = devm_request_irq(&pdev->dev, mmbi->irq, aspeed_pcie_mmbi_isr, 0, dev_name(&pdev->dev), ++ mmbi); ++ if (ret) { ++ dev_err(dev, "pcie mmbi unable to get IRQ"); ++ goto out_unmap; ++ } ++ ++ chan = &mmbi->chan; ++ memset(chan, 0, sizeof(struct aspeed_mmbi_channel)); ++ ++ chan->bmc_int_en = true; ++ /* H2B Interrupt */ ++ ret = of_property_read_u8(dev->of_node, "bmc-int-value", &chan->bmc_int_value); ++ if (ret) { ++ dev_err(dev, "cannot get valid MMBI H2B interrupt value\n"); ++ chan->bmc_int_en = false; ++ } ++ ret = of_property_read_u32(dev->of_node, "bmc-int-location", &chan->bmc_int_location); ++ if (ret) { ++ dev_err(dev, "cannot get valid MMBI H2B interrupt location\n"); ++ chan->bmc_int_en = false; ++ } ++ /* B2H Interrupt */ ++ chan->host_int_en = true; ++ ret = of_property_read_u8(dev->of_node, "msi", &chan->host_int_value); ++ if (ret) { ++ dev_err(dev, "cannot get valid MMBI B2H interrupt location\n"); ++ chan->host_int_en = false; ++ } ++ ++ ret = mmbi->platform->mmbi_init(pdev); ++ if (ret) { ++ dev_err(dev, "Initialize pcie mmbi failed\n"); ++ goto out_irq; ++ } ++ ++ dev_info(dev, "ASPEED PCIe MMBI Dev %d: driver successfully loaded.\n", mmbi->id); ++ ++ return 0; ++out_irq: ++ devm_free_irq(dev, mmbi->irq, mmbi); ++out_unmap: ++ iounmap(mmbi->mem_virt); ++out_region: ++ devm_kfree(dev, mmbi); ++ dev_warn(dev, "aspeed bmc device: driver init failed (ret=%d)!\n", ret); ++ return ret; ++} ++ ++static void aspeed_pcie_mmbi_remove(struct platform_device *pdev) ++{ ++ struct aspeed_pcie_mmbi *mmbi = platform_get_drvdata(pdev); ++ ++ cancel_work_sync(&mmbi->chan.work); ++ unregister_netdev(mmbi->chan.ndev); ++ devm_free_irq(&pdev->dev, mmbi->irq, mmbi); ++ iounmap(mmbi->mem_virt); ++ devm_kfree(&pdev->dev, mmbi); ++} ++ ++static struct platform_driver aspeed_pcie_mmbi_driver = { ++ .probe = aspeed_pcie_mmbi_probe, ++ .remove = aspeed_pcie_mmbi_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_pcie_mmbi_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_pcie_mmbi_driver); ++ ++MODULE_AUTHOR("Jacky Chou "); ++MODULE_DESCRIPTION("ASPEED PCI-E MMBI Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/aspeed-pcie-mmbi.h b/drivers/soc/aspeed/aspeed-pcie-mmbi.h +--- a/drivers/soc/aspeed/aspeed-pcie-mmbi.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-pcie-mmbi.h 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,141 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2024 Aspeed Technology Inc. ++ */ ++#ifndef __ASPEED_PCIE_MMBI_H__ ++#define __ASPEED_PCIE_MMBI_H__ ++ ++#define MMBI_SIGNATURE "#MMBI$" ++ ++//This definitions are as per MMBI specification. ++#define MMBI_PROTOCOL_IPMI 1 ++#define MMBI_PROTOCOL_SEAMLESS 2 ++#define MMBI_PROTOCOL_RAS_OFFLOAD 3 ++#define MMBI_PROTOCOL_MCTP 4 ++#define MMBI_PROTOCOL_NODE_MANAGER 5 ++ ++#define MMBI_HRWS0(x) readl((x)->hrws_vmem) ++#define MMBI_HRWS1(x) readl((x)->hrws_vmem + 4) ++#define MMBI_HROS0(x) readl((x)->hros_vmem) ++#define MMBI_HROS1(x) readl((x)->hros_vmem + 4) ++ ++#define H2B_WRITE_POINTER_MASK GENMASK(31, 2) ++#define H2B_READ_POINTER_MASK GENMASK(31, 2) ++#define B2H_WRITE_POINTER_MASK GENMASK(31, 2) ++#define B2H_READ_POINTER_MASK GENMASK(31, 2) ++ ++#define GET_H2B_WRITE_POINTER(x) (MMBI_HRWS0(x) & H2B_WRITE_POINTER_MASK) ++#define GET_H2B_READ_POINTER(x) (MMBI_HROS1(x) & H2B_READ_POINTER_MASK) ++#define GET_B2H_WRITE_POINTER(x) (MMBI_HROS0(x) & B2H_WRITE_POINTER_MASK) ++#define GET_B2H_READ_POINTER(x) (MMBI_HRWS1(x) & B2H_READ_POINTER_MASK) ++ ++#define GET_HOST_READY_BIT(x) (MMBI_HRWS1(x) & 0x01) ++#define GET_BMC_READY_BIT(x) (MMBI_HROS1(x) & 0x01) ++ ++typedef u8 protocol_type; ++ ++enum mmbi_state { /* B_U B_R H_U H_R */ ++ INIT_IN_PROGRESS = 0x00, /* 0 0 0 0 */ ++ INIT_COMPLETED = 0x08, /* 1 0 0 0 */ ++ NORMAL_RUNTIME = 0x0A, /* 1 0 1 0 */ ++ RESET_REQ_BY_BMC = 0x0E, /* 1 1 1 0 */ ++ RESET_REQ_BY_HOST = 0x0B, /* 1 0 1 1 */ ++ RESET_ACKED = 0x0F, /* 1 1 1 1 */ ++ TRANS_TO_INIT = 0x07, /* 0 1 1 1 */ ++ INIT_MISMATCH = 0x09, /* 1 0 0 1 */ ++ POWER_UP_OR_ERROR = 0x80000000, ++}; ++ ++struct mmbi_header { ++ u32 pkt_pad : 2; ++ u32 pkt_len : 22; ++ u32 pkt_type : 4; ++ u32 reserved : 4; ++}; ++ ++struct host_ros { ++ u32 b_rst : 1; /* BMC Reset Request */ ++ u32 b_up : 1; /* BMC Interface Up */ ++ u32 b2h_wp : 30; /* B2H Write Pointer */ ++ u32 b_rdy : 1; /* BMC Ready */ ++ u32 reserved1 : 1; ++ u32 h2b_rp : 30; /* H2B Read Pointer */ ++}; ++ ++struct host_rws { ++ u32 h_rst : 1; /* Host Reset Request */ ++ u32 h_up : 1; /* Host Interface Up */ ++ u32 h2b_wp : 30; /* H2B Write Pointer */ ++ u32 h_rdy : 1; /* Host Ready */ ++ u32 reserved1 : 1; ++ u32 b2h_rp : 30; /* B2H Read Pointer */ ++}; ++ ++struct buffer_type_desc { ++ u32 h_ros_p; /* Host Read-Only Structure Pointer */ ++ u32 h_rws_p; /* Host Read-Write Structure Pointer */ ++ u8 h_int_t; /* Host Interrupt Type */ ++ u8 h_int_l; /* Host Interrupt Location */ ++ u8 reserved1[3]; ++ u8 h_int_v; /* Host Interrupt Value */ ++ u8 bmc_int_t; /* BMC Interrupt Type */ ++ u32 bmc_int_l; /* BMC Interrupt Location */ ++ u8 reserved2[4]; ++ u8 bmc_int_v; /* BMC Interrupt Value */ ++} __packed; ++ ++struct mmbi_cap_desc { ++ u8 signature[6]; ++ u8 version; ++ u8 os_use; ++ u32 b2h_ba; /* B2H Buffer Base Address */ ++ u32 h2b_ba; /* H2B Buffer Base Address */ ++ u32 b2h_l; /* B2H Buffer Length */ ++ u32 h2b_l; /* H2B Buffer Length */ ++ u8 buffer_type; ++ u8 reserved1[7]; ++ struct buffer_type_desc bt_desc; ++ u8 reserved2[8]; ++} __packed; ++ ++struct aspeed_pcie_mmbi; ++ ++#define MCTP_MMBI_MTU 65536 ++#define MCTP_MMBI_MTU_MIN 68 /* base mtu (64) + mctp header */ ++#define MCTP_MMBI_MTU_MAX 65536 ++ ++struct aspeed_mmbi_mctp { ++ struct aspeed_pcie_mmbi *mmbi; ++ struct net_device *ndev; ++}; ++ ++struct aspeed_mmbi_channel { ++ struct aspeed_pcie_mmbi *mmbi; ++ struct device *dev; ++ ++ u32 b2h_cb_size; ++ u32 h2b_cb_size; ++ u8 __iomem *desc_vmem; ++ u8 __iomem *hros_vmem; ++ u8 __iomem *b2h_cb_vmem; ++ u8 __iomem *hrws_vmem; ++ u8 __iomem *h2b_cb_vmem; ++ ++ bool bmc_int_en; ++ u8 bmc_int_value; ++ u32 bmc_int_location; ++ u8 __iomem *bmc_int_vmem; ++ ++ bool host_int_en; ++ u8 host_int_location; ++ u8 host_int_value; ++ ++ enum mmbi_state state; ++ ++ /* MCTP */ ++ struct net_device *ndev; ++ ++ struct work_struct work; ++}; ++ ++#endif +diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c +--- a/drivers/soc/aspeed/aspeed-socinfo.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-socinfo.c 2025-12-23 10:16:21.125032653 +0000 +@@ -27,6 +27,10 @@ + { "AST2620", 0x05010203 }, + { "AST2605", 0x05030103 }, + { "AST2625", 0x05030403 }, ++ /* AST2700 */ ++ { "AST2750", 0x06000003 }, ++ { "AST2700", 0x06000103 }, ++ { "AST2720", 0x06000203 }, + }; + + static const char *siliconid_to_name(u32 siliconid) +diff --git a/drivers/soc/aspeed/aspeed-ssp.c b/drivers/soc/aspeed/aspeed-ssp.c +--- a/drivers/soc/aspeed/aspeed-ssp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-ssp.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,275 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright (C) ASPEED Technology Inc. ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SSP_FILE_NAME "ast2600_ssp.bin" ++#define AST2600_CVIC_TRIGGER 0x28 ++#define AST2600_CVIC_PENDING_STATUS 0x18 ++#define AST2600_CVIC_PENDING_CLEAR 0x1C ++ ++#define SSP_CTRL_REG 0xa00 ++#define SSP_CTRL_RESET_ASSERT BIT(1) ++#define SSP_CTRL_EN BIT(0) ++ ++#define SSP_MEM_BASE_REG 0xa04 ++#define SSP_IMEM_LIMIT_REG 0xa08 ++#define SSP_DMEM_LIMIT_REG 0xa0c ++#define SSP_CACHE_RANGE_REG 0xa40 ++#define SSP_CACHE_INVALID_REG 0xa44 ++#define SSP_CACHE_CTRL_REG 0xa48 ++#define SSP_CACHE_CLEAR_ICACHE BIT(2) ++#define SSP_CACHE_CLEAR_DCACHE BIT(1) ++#define SSP_CACHE_EN BIT(0) ++ ++#define SSP_TOTAL_MEM_SZ (32 * 1024 * 1024) ++#define SSP_CACHED_MEM_SZ (16 * 1024 * 1024) ++#define SSP_UNCACHED_MEM_SZ (SSP_TOTAL_MEM_SZ - SSP_CACHED_MEM_SZ) ++#define SSP_CACHE_1ST_16MB_ENABLE BIT(0) ++ ++struct ast2600_ssp { ++ struct device *dev; ++ struct regmap *scu; ++ dma_addr_t ssp_mem_phy_addr; ++ void __iomem *ssp_mem_vir_addr; ++ dma_addr_t ssp_shared_mem_phy_addr; ++ void __iomem *ssp_shared_mem_vir_addr; ++ int ssp_shared_mem_size; ++ void __iomem *cvic; ++ int irq[16]; ++ int n_irq; ++}; ++ ++static int ast_ssp_open(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static int ast_ssp_release(struct inode *inode, struct file *file) ++{ ++ return 0; ++} ++ ++static const struct file_operations ast_ssp_fops = { ++ .owner = THIS_MODULE, ++ .open = ast_ssp_open, ++ .release = ast_ssp_release, ++ .llseek = noop_llseek, ++}; ++ ++struct miscdevice ast_ssp_misc = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "ast-ssp", ++ .fops = &ast_ssp_fops, ++}; ++ ++static irqreturn_t ast2600_ssp_interrupt(int irq, void *dev_id) ++{ ++ u32 i; ++ struct ast2600_ssp *priv = dev_id; ++ u32 isr = readl(priv->cvic + AST2600_CVIC_PENDING_STATUS); ++ u32 ssp_shared_rx_tx_size = priv->ssp_shared_mem_size / 2; ++ u32 *ssp_shared_mem_tx = priv->ssp_shared_mem_vir_addr; ++ u32 *ssp_shared_mem_rx = priv->ssp_shared_mem_vir_addr + ssp_shared_rx_tx_size; ++ ++ dev_info(priv->dev, "isr %x\n", isr); ++ writel(isr, priv->cvic + AST2600_CVIC_PENDING_CLEAR); ++ ++ dev_info(priv->dev, "[CA7] rx addr:%08x, tx addr:%08x\n", ++ (u32)ssp_shared_mem_rx, (u32)ssp_shared_mem_tx); ++ ++ /* Check the CA7 RX data from CM3 TX data. */ ++ dev_info(priv->dev, "CA7 RX data from CM3 TX data: "); ++ for (i = 0; i < ssp_shared_rx_tx_size / 4; i++) { ++ if (readl(ssp_shared_mem_rx + i) != 0) { ++ dev_info(priv->dev, "[%08x] %08x ", ++ (u32)(ssp_shared_mem_rx + i), readl(ssp_shared_mem_rx + i)); ++ } else { ++ break; ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int ast_ssp_probe(struct platform_device *pdev) ++{ ++ struct device_node *np, *mnode = dev_of_node(&pdev->dev); ++ const struct firmware *firmware; ++ struct ast2600_ssp *priv; ++ struct reserved_mem *rmem; ++ int i, ret; ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) { ++ ret = -ENOMEM; ++ goto finish; ++ } ++ ++ priv->dev = &pdev->dev; ++ priv->scu = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,scu"); ++ if (IS_ERR(priv->scu)) { ++ dev_err(priv->dev, "failed to find SCU regmap\n"); ++ ret = -EINVAL; ++ goto finish; ++ } ++ platform_set_drvdata(pdev, priv); ++ ++ ret = misc_register(&ast_ssp_misc); ++ if (ret) { ++ pr_err("can't misc_register :(\n"); ++ ret = -EIO; ++ goto finish; ++ } ++ dev_set_drvdata(ast_ssp_misc.this_device, pdev); ++ ++ ret = of_reserved_mem_device_init(&pdev->dev); ++ if (ret) { ++ dev_err(priv->dev, ++ "failed to initialize reserved mem: %d\n", ret); ++ ret = -ENOMEM; ++ goto finish; ++ } ++ ++ np = of_parse_phandle(priv->dev->of_node, "memory-region", 0); ++ if (!np) { ++ dev_err(priv->dev, "can't find memory-region node\n"); ++ ret = -ENOMEM; ++ goto finish; ++ } ++ ++ rmem = of_reserved_mem_lookup(np); ++ of_node_put(np); ++ if (!rmem) { ++ dev_err(priv->dev, "can't find reserved memory.\n"); ++ ret = -ENOMEM; ++ goto finish; ++ } else { ++ priv->ssp_mem_phy_addr = rmem->base; ++ priv->ssp_mem_vir_addr = devm_ioremap(priv->dev, priv->ssp_mem_phy_addr, SSP_TOTAL_MEM_SZ); ++ if (!priv->ssp_mem_vir_addr) { ++ dev_err(priv->dev, "can't create reserved memory.\n"); ++ ret = -ENOMEM; ++ goto finish; ++ } else { ++ dev_info(priv->dev, "\nSSP memory: virt(0x%08x), phys(0x%08x)\n", ++ (uint32_t)priv->ssp_mem_vir_addr, priv->ssp_mem_phy_addr); ++ } ++ } ++ ++ if (of_property_read_u32(np, "shm-size", &priv->ssp_shared_mem_size)) { ++ dev_err(priv->dev, "can't find shm-size property\n"); ++ ret = -ENOMEM; ++ goto finish; ++ } ++ ++ priv->ssp_shared_mem_vir_addr = priv->ssp_mem_vir_addr + SSP_TOTAL_MEM_SZ ++ - priv->ssp_shared_mem_size; ++ priv->ssp_shared_mem_phy_addr = priv->ssp_mem_phy_addr + SSP_TOTAL_MEM_SZ ++ - priv->ssp_shared_mem_size; ++ dev_info(priv->dev, "\nSSP shared memory: virt(0x%08x), phys(0x%08x), size(0x%08x)\n", ++ (uint32_t)priv->ssp_shared_mem_vir_addr, priv->ssp_shared_mem_phy_addr, ++ priv->ssp_shared_mem_size); ++ ++ if (request_firmware(&firmware, SSP_FILE_NAME, priv->dev) < 0) { ++ dev_err(priv->dev, "don't have %s\n", SSP_FILE_NAME); ++ release_firmware(firmware); ++ ret = -EINVAL; ++ goto finish; ++ } ++ ++ memcpy(priv->ssp_mem_vir_addr, (void *)firmware->data, firmware->size); ++ release_firmware(firmware); ++ ++ np = of_parse_phandle(mnode, "aspeed,cvic", 0); ++ if (!np) { ++ dev_err(priv->dev, "can't find CVIC\n"); ++ ret = -EINVAL; ++ goto finish; ++ } ++ ++ priv->cvic = devm_of_iomap(priv->dev, np, 0, NULL); ++ if (IS_ERR(priv->cvic)) { ++ dev_err(priv->dev, "can't map CVIC\n"); ++ ret = -EINVAL; ++ goto finish; ++ } ++ ++ i = 0; ++ while (0 != (priv->irq[i] = irq_of_parse_and_map(mnode, i))) { ++ ret = request_irq(priv->irq[i], ast2600_ssp_interrupt, 0, ++ "ssp-sw-irq", priv); ++ i++; ++ } ++ priv->n_irq = i; ++ dev_info(priv->dev, "%d ISRs registered\n", priv->n_irq); ++ ++ regmap_write(priv->scu, SSP_CTRL_REG, 0); ++ mdelay(1); ++ regmap_write(priv->scu, SSP_MEM_BASE_REG, priv->ssp_mem_phy_addr); ++ regmap_write(priv->scu, SSP_IMEM_LIMIT_REG, priv->ssp_mem_phy_addr + SSP_CACHED_MEM_SZ); ++ regmap_write(priv->scu, SSP_DMEM_LIMIT_REG, priv->ssp_mem_phy_addr + SSP_TOTAL_MEM_SZ); ++ ++ regmap_write(priv->scu, SSP_CACHE_RANGE_REG, SSP_CACHE_1ST_16MB_ENABLE); ++ ++ regmap_write(priv->scu, SSP_CTRL_REG, SSP_CTRL_RESET_ASSERT); ++ mdelay(1); ++ regmap_write(priv->scu, SSP_CTRL_REG, 0); ++ mdelay(1); ++ regmap_write(priv->scu, SSP_CTRL_REG, SSP_CTRL_EN); ++ ++ dev_info(priv->dev, "Init successful\n"); ++ ret = 0; ++finish: ++ return ret; ++} ++ ++static void ast_ssp_remove(struct platform_device *pdev) ++{ ++ struct ast2600_ssp *priv = platform_get_drvdata(pdev); ++ int i; ++ ++ dev_info(priv->dev, "SSP module removed\n"); ++ regmap_write(priv->scu, SSP_CTRL_REG, 0); ++ for (i = 0; i < priv->n_irq; i++) ++ free_irq(priv->irq[i], priv); ++ ++ kfree(priv); ++ ++ misc_deregister((struct miscdevice *)&ast_ssp_misc); ++} ++ ++static const struct of_device_id of_ast_ssp_match_table[] = { ++ { .compatible = "aspeed,ast2600-ssp", }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, of_ast_ssp_match_table); ++ ++static struct platform_driver ast_ssp_driver = { ++ .probe = ast_ssp_probe, ++ .remove = ast_ssp_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = of_ast_ssp_match_table, ++ }, ++}; ++ ++module_platform_driver(ast_ssp_driver); ++ ++MODULE_LICENSE("Dual BSD/GPL"); +diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c +--- a/drivers/soc/aspeed/aspeed-uart-routing.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-uart-routing.c 2025-12-23 10:16:21.125032653 +0000 +@@ -15,20 +15,30 @@ + #define HICRA 0x9c + + /* attributes options */ ++#define UART_ROUTING_IO0 "io0" + #define UART_ROUTING_IO1 "io1" + #define UART_ROUTING_IO2 "io2" + #define UART_ROUTING_IO3 "io3" + #define UART_ROUTING_IO4 "io4" + #define UART_ROUTING_IO5 "io5" + #define UART_ROUTING_IO6 "io6" ++#define UART_ROUTING_IO7 "io7" ++#define UART_ROUTING_IO8 "io8" ++#define UART_ROUTING_IO9 "io9" + #define UART_ROUTING_IO10 "io10" ++#define UART_ROUTING_IO12 "io12" ++#define UART_ROUTING_UART0 "uart0" + #define UART_ROUTING_UART1 "uart1" + #define UART_ROUTING_UART2 "uart2" + #define UART_ROUTING_UART3 "uart3" + #define UART_ROUTING_UART4 "uart4" + #define UART_ROUTING_UART5 "uart5" + #define UART_ROUTING_UART6 "uart6" ++#define UART_ROUTING_UART7 "uart7" ++#define UART_ROUTING_UART8 "uart8" ++#define UART_ROUTING_UART9 "uart9" + #define UART_ROUTING_UART10 "uart10" ++#define UART_ROUTING_UART12 "uart12" + #define UART_ROUTING_RES "reserved" + + struct aspeed_uart_routing { +@@ -488,6 +498,416 @@ + .attrs = ast2600_uart_routing_attrs, + }; + ++/* routing selector for AST27xx node 0 */ ++static struct aspeed_uart_routing_selector ast2700n0_uart9_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART9), ++ .reg = HICR9, ++ .shift = 12, ++ .mask = 0xf, ++ .options = { ++ UART_ROUTING_IO9, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_RES, ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART12, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_io9_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO9), ++ .reg = HICR9, ++ .shift = 8, ++ .mask = 0xf, ++ .options = { ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART12, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_RES, ++ UART_ROUTING_UART9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_uart3_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3), ++ .reg = HICRA, ++ .shift = 25, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO3, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO2, ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_uart2_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2), ++ .reg = HICRA, ++ .shift = 22, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_uart1_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1), ++ .reg = HICRA, ++ .shift = 19, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_IO0, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART0, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_uart0_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART0), ++ .reg = HICRA, ++ .shift = 16, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_io3_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3), ++ .reg = HICRA, ++ .shift = 9, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART9, ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_io2_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2), ++ .reg = HICRA, ++ .shift = 6, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART9, ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_IO0, ++ UART_ROUTING_IO1, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_io1_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1), ++ .reg = HICRA, ++ .shift = 3, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART9, ++ UART_ROUTING_UART0, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n0_io0_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO0), ++ .reg = HICRA, ++ .shift = 0, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART0, ++ UART_ROUTING_UART1, ++ UART_ROUTING_UART2, ++ UART_ROUTING_UART3, ++ UART_ROUTING_UART9, ++ UART_ROUTING_IO2, ++ UART_ROUTING_IO3, ++ UART_ROUTING_IO9, ++ NULL, ++ }, ++}; ++ ++static struct attribute *ast2700n0_uart_routing_attrs[] = { ++ &ast2700n0_uart9_sel.dev_attr.attr, ++ &ast2700n0_io9_sel.dev_attr.attr, ++ &ast2700n0_uart3_sel.dev_attr.attr, ++ &ast2700n0_uart2_sel.dev_attr.attr, ++ &ast2700n0_uart1_sel.dev_attr.attr, ++ &ast2700n0_uart0_sel.dev_attr.attr, ++ &ast2700n0_io3_sel.dev_attr.attr, ++ &ast2700n0_io2_sel.dev_attr.attr, ++ &ast2700n0_io1_sel.dev_attr.attr, ++ &ast2700n0_io0_sel.dev_attr.attr, ++ NULL, ++}; ++ ++static const struct attribute_group ast2700n0_uart_routing_attr_group = { ++ .attrs = ast2700n0_uart_routing_attrs, ++}; ++ ++/* routing selector for AST27xx node 1 */ ++static struct aspeed_uart_routing_selector ast2700n1_uart10_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10), ++ .reg = HICR9, ++ .shift = 12, ++ .mask = 0xf, ++ .options = { ++ UART_ROUTING_IO10, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_RES, ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART12, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_io10_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10), ++ .reg = HICR9, ++ .shift = 8, ++ .mask = 0xf, ++ .options = { ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART12, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_RES, ++ UART_ROUTING_UART10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_uart8_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART8), ++ .reg = HICRA, ++ .shift = 25, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO8, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO7, ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_uart7_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART7), ++ .reg = HICRA, ++ .shift = 22, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_uart6_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART6), ++ .reg = HICRA, ++ .shift = 19, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_IO5, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART5, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_uart5_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART5), ++ .reg = HICRA, ++ .shift = 16, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_io8_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO8), ++ .reg = HICRA, ++ .shift = 9, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART10, ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_io7_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO7), ++ .reg = HICRA, ++ .shift = 6, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART10, ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_IO5, ++ UART_ROUTING_IO6, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_io6_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6), ++ .reg = HICRA, ++ .shift = 3, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART10, ++ UART_ROUTING_UART5, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct aspeed_uart_routing_selector ast2700n1_io5_sel = { ++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO5), ++ .reg = HICRA, ++ .shift = 0, ++ .mask = 0x7, ++ .options = { ++ UART_ROUTING_UART5, ++ UART_ROUTING_UART6, ++ UART_ROUTING_UART7, ++ UART_ROUTING_UART8, ++ UART_ROUTING_UART10, ++ UART_ROUTING_IO7, ++ UART_ROUTING_IO8, ++ UART_ROUTING_IO10, ++ NULL, ++ }, ++}; ++ ++static struct attribute *ast2700n1_uart_routing_attrs[] = { ++ &ast2700n1_uart10_sel.dev_attr.attr, ++ &ast2700n1_io10_sel.dev_attr.attr, ++ &ast2700n1_uart8_sel.dev_attr.attr, ++ &ast2700n1_uart7_sel.dev_attr.attr, ++ &ast2700n1_uart6_sel.dev_attr.attr, ++ &ast2700n1_uart5_sel.dev_attr.attr, ++ &ast2700n1_io8_sel.dev_attr.attr, ++ &ast2700n1_io7_sel.dev_attr.attr, ++ &ast2700n1_io6_sel.dev_attr.attr, ++ &ast2700n1_io5_sel.dev_attr.attr, ++ NULL, ++}; ++ ++static const struct attribute_group ast2700n1_uart_routing_attr_group = { ++ .attrs = ast2700n1_uart_routing_attrs, ++}; ++ + static ssize_t aspeed_uart_routing_show(struct device *dev, + struct device_attribute *attr, + char *buf) +@@ -580,6 +1000,10 @@ + .data = &ast2500_uart_routing_attr_group }, + { .compatible = "aspeed,ast2600-uart-routing", + .data = &ast2600_uart_routing_attr_group }, ++ { .compatible = "aspeed,ast2700n0-uart-routing", ++ .data = &ast2700n0_uart_routing_attr_group }, ++ { .compatible = "aspeed,ast2700n1-uart-routing", ++ .data = &ast2700n1_uart_routing_attr_group }, + { }, + }; + +@@ -589,7 +1013,7 @@ + .of_match_table = aspeed_uart_routing_table, + }, + .probe = aspeed_uart_routing_probe, +- .remove_new = aspeed_uart_routing_remove, ++ .remove = aspeed_uart_routing_remove, + }; + + module_platform_driver(aspeed_uart_routing_driver); +diff --git a/drivers/soc/aspeed/aspeed-udma.c b/drivers/soc/aspeed/aspeed-udma.c +--- a/drivers/soc/aspeed/aspeed-udma.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-udma.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,434 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright 2020 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "aspeed-udma" ++ ++/* UART DMA registers offset */ ++#define UDMA_TX_DMA_EN 0x000 ++#define UDMA_RX_DMA_EN 0x004 ++#define UDMA_MISC 0x008 ++#define UDMA_MISC_RX_BUFSZ GENMASK(3, 2) ++#define UDMA_MISC_TX_BUFSZ GENMASK(1, 0) ++#define UDMA_TMOUT_TIMER 0x00c ++#define UDMA_TX_DMA_RST 0x020 ++#define UDMA_RX_DMA_RST 0x024 ++#define UDMA_TX_DMA_INT_EN 0x030 ++#define UDMA_TX_DMA_INT_STS 0x034 ++#define UDMA_RX_DMA_INT_EN 0x038 ++#define UDMA_RX_DMA_INT_STS 0x03c ++ ++#define UDMA_CHX_OFF(x) ((x) * 0x20) ++#define UDMA_CHX_TX_RD_PTR(x) (0x040 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_TX_WR_PTR(x) (0x044 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_TX_BUF_ADDR(x) (0x048 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_TX_CTRL(x) (0x04c + UDMA_CHX_OFF(x)) ++#define UDMA_TX_CTRL_BUF_ADDRH GENMASK(10, 8) ++#define UDMA_TX_CTRL_TMOUT_DIS BIT(4) ++#define UDMA_TX_CTRL_BUFSZ GENMASK(3, 0) ++#define UDMA_CHX_RX_RD_PTR(x) (0x050 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_RX_WR_PTR(x) (0x054 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_RX_BUF_ADDR(x) (0x058 + UDMA_CHX_OFF(x)) ++#define UDMA_CHX_RX_CTRL(x) (0x05c + UDMA_CHX_OFF(x)) ++#define UDMA_RX_CTRL_BUF_ADDRH GENMASK(10, 8) ++#define UDMA_RX_CTRL_TMOUT_DIS BIT(4) ++#define UDMA_RX_CTRL_BUFSZ GENMASK(1, 0) ++ ++#define UDMA_MAX_CHANNEL 16 ++#define UDMA_TMOUT 0x200 ++ ++enum aspeed_udma_bufsz_code { ++ UDMA_BUFSZ_CODE_1KB, ++ UDMA_BUFSZ_CODE_4KB, ++ UDMA_BUFSZ_CODE_16KB, ++ UDMA_BUFSZ_CODE_64KB, ++}; ++ ++struct aspeed_udma_chan { ++ dma_addr_t dma_addr; ++ ++ struct kfifo *fifo; ++ u32 fifo_sz; ++ ++ aspeed_udma_cb_t cb; ++ void *cb_arg; ++ ++ bool dis_tmout; ++}; ++ ++struct aspeed_udma { ++ struct device *dev; ++ u8 __iomem *regs; ++ int irq; ++ struct aspeed_udma_chan tx_chs[UDMA_MAX_CHANNEL]; ++ struct aspeed_udma_chan rx_chs[UDMA_MAX_CHANNEL]; ++ spinlock_t lock; ++}; ++ ++struct aspeed_udma udma[1]; ++ ++static int aspeed_udma_get_bufsz_code(u32 buf_sz) ++{ ++ switch (buf_sz) { ++ case SZ_1K: ++ return UDMA_BUFSZ_CODE_1KB; ++ case SZ_4K: ++ return UDMA_BUFSZ_CODE_4KB; ++ case SZ_16K: ++ return UDMA_BUFSZ_CODE_16KB; ++ case SZ_64K: ++ return UDMA_BUFSZ_CODE_64KB; ++ default: ++ break; ++ } ++ ++ return -1; ++} ++ ++static u32 aspeed_udma_get_tx_rptr(u32 ch_no) ++{ ++ return readl(udma->regs + UDMA_CHX_TX_RD_PTR(ch_no)); ++} ++ ++static u32 aspeed_udma_get_rx_wptr(u32 ch_no) ++{ ++ return readl(udma->regs + UDMA_CHX_RX_WR_PTR(ch_no)); ++} ++ ++static void aspeed_udma_set_ptr(u32 ch_no, u32 ptr, bool is_tx) ++{ ++ writel(ptr, udma->regs + ++ ((is_tx) ? UDMA_CHX_TX_WR_PTR(ch_no) : UDMA_CHX_RX_RD_PTR(ch_no))); ++} ++ ++void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr) ++{ ++ aspeed_udma_set_ptr(ch_no, wptr, true); ++} ++EXPORT_SYMBOL(aspeed_udma_set_tx_wptr); ++ ++void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr) ++{ ++ aspeed_udma_set_ptr(ch_no, rptr, false); ++} ++EXPORT_SYMBOL(aspeed_udma_set_rx_rptr); ++ ++static int aspeed_udma_free_chan(u32 ch_no, bool is_tx) ++{ ++ u32 reg; ++ unsigned long flags; ++ ++ if (ch_no > UDMA_MAX_CHANNEL) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&udma->lock, flags); ++ ++ reg = readl(udma->regs + ++ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN)); ++ reg &= ~(0x1 << ch_no); ++ ++ writel(reg, udma->regs + ++ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN)); ++ ++ spin_unlock_irqrestore(&udma->lock, flags); ++ ++ return 0; ++} ++ ++int aspeed_udma_free_tx_chan(u32 ch_no) ++{ ++ return aspeed_udma_free_chan(ch_no, true); ++} ++EXPORT_SYMBOL(aspeed_udma_free_tx_chan); ++ ++int aspeed_udma_free_rx_chan(u32 ch_no) ++{ ++ return aspeed_udma_free_chan(ch_no, false); ++} ++EXPORT_SYMBOL(aspeed_udma_free_rx_chan); ++ ++static int aspeed_udma_request_chan(u32 ch_no, dma_addr_t addr, ++ struct kfifo *fifo, u32 fifo_sz, ++ aspeed_udma_cb_t cb, void *id, bool dis_tmout, bool is_tx) ++{ ++ int retval = 0; ++ int fifosz_code; ++ ++ u32 reg; ++ unsigned long flags; ++ struct aspeed_udma_chan *ch; ++ ++ if (ch_no > UDMA_MAX_CHANNEL) { ++ retval = -EINVAL; ++ goto out; ++ } ++ ++ if (IS_ERR_OR_NULL(fifo) || IS_ERR_OR_NULL(fifo->kfifo.data)) { ++ retval = -EINVAL; ++ goto out; ++ } ++ ++ fifosz_code = aspeed_udma_get_bufsz_code(fifo_sz); ++ if (fifosz_code < 0) { ++ retval = -EINVAL; ++ goto out; ++ } ++ ++ spin_lock_irqsave(&udma->lock, flags); ++ ++ if (is_tx) { ++ reg = readl(udma->regs + UDMA_TX_DMA_INT_EN); ++ if (reg & (0x1 << ch_no)) { ++ retval = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ reg |= (0x1 << ch_no); ++ writel(reg, udma->regs + UDMA_TX_DMA_INT_EN); ++ ++ reg = FIELD_PREP(UDMA_TX_CTRL_BUF_ADDRH, (u64)addr >> 32) | ++ ((dis_tmout) ? UDMA_TX_CTRL_TMOUT_DIS : 0) | ++ FIELD_PREP(UDMA_TX_CTRL_BUFSZ, fifosz_code); ++ writel(reg, udma->regs + UDMA_CHX_TX_CTRL(ch_no)); ++ ++ writel(addr, udma->regs + UDMA_CHX_TX_BUF_ADDR(ch_no)); ++ } else { ++ reg = readl(udma->regs + UDMA_RX_DMA_INT_EN); ++ if (reg & (0x1 << ch_no)) { ++ retval = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ reg |= (0x1 << ch_no); ++ writel(reg, udma->regs + UDMA_RX_DMA_INT_EN); ++ ++ reg = FIELD_PREP(UDMA_RX_CTRL_BUF_ADDRH, (u64)addr >> 32) | ++ ((dis_tmout) ? UDMA_RX_CTRL_TMOUT_DIS : 0) | ++ FIELD_PREP(UDMA_RX_CTRL_BUFSZ, fifosz_code); ++ writel(reg, udma->regs + UDMA_CHX_RX_CTRL(ch_no)); ++ ++ writel(addr, udma->regs + UDMA_CHX_RX_BUF_ADDR(ch_no)); ++ } ++ ++ ch = (is_tx) ? &udma->tx_chs[ch_no] : &udma->rx_chs[ch_no]; ++ ch->fifo = fifo; ++ ch->fifo_sz = fifo_sz; ++ ch->cb = cb; ++ ch->cb_arg = id; ++ ch->dma_addr = addr; ++ ch->dis_tmout = dis_tmout; ++ ++unlock_n_out: ++ spin_unlock_irqrestore(&udma->lock, flags); ++out: ++ return 0; ++} ++ ++int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr, ++ struct kfifo *fifo, u32 fifo_sz, ++ aspeed_udma_cb_t cb, void *id, bool dis_tmout) ++{ ++ return aspeed_udma_request_chan(ch_no, addr, fifo, fifo_sz, cb, id, ++ dis_tmout, true); ++} ++EXPORT_SYMBOL(aspeed_udma_request_tx_chan); ++ ++int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr, ++ struct kfifo *fifo, u32 fifo_sz, ++ aspeed_udma_cb_t cb, void *id, bool dis_tmout) ++{ ++ return aspeed_udma_request_chan(ch_no, addr, fifo, fifo_sz, cb, id, ++ dis_tmout, false); ++} ++EXPORT_SYMBOL(aspeed_udma_request_rx_chan); ++ ++static void aspeed_udma_chan_ctrl(u32 ch_no, u32 op, bool is_tx) ++{ ++ unsigned long flags; ++ u32 reg_en, reg_rst; ++ u32 reg_en_off = (is_tx) ? UDMA_TX_DMA_EN : UDMA_RX_DMA_EN; ++ u32 reg_rst_off = (is_tx) ? UDMA_TX_DMA_RST : UDMA_TX_DMA_RST; ++ ++ if (ch_no > UDMA_MAX_CHANNEL) ++ return; ++ ++ spin_lock_irqsave(&udma->lock, flags); ++ ++ reg_en = readl(udma->regs + reg_en_off); ++ reg_rst = readl(udma->regs + reg_rst_off); ++ ++ switch (op) { ++ case ASPEED_UDMA_OP_ENABLE: ++ reg_en |= (0x1 << ch_no); ++ writel(reg_en, udma->regs + reg_en_off); ++ break; ++ case ASPEED_UDMA_OP_DISABLE: ++ reg_en &= ~(0x1 << ch_no); ++ writel(reg_en, udma->regs + reg_en_off); ++ break; ++ case ASPEED_UDMA_OP_RESET: ++ reg_en &= ~(0x1 << ch_no); ++ writel(reg_en, udma->regs + reg_en_off); ++ ++ reg_rst |= (0x1 << ch_no); ++ writel(reg_rst, udma->regs + reg_rst_off); ++ ++ udelay(100); ++ ++ reg_rst &= ~(0x1 << ch_no); ++ writel(reg_rst, udma->regs + reg_rst_off); ++ break; ++ default: ++ break; ++ } ++ ++ spin_unlock_irqrestore(&udma->lock, flags); ++} ++ ++void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op) ++{ ++ aspeed_udma_chan_ctrl(ch_no, op, true); ++} ++EXPORT_SYMBOL(aspeed_udma_tx_chan_ctrl); ++ ++void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op) ++{ ++ aspeed_udma_chan_ctrl(ch_no, op, false); ++} ++EXPORT_SYMBOL(aspeed_udma_rx_chan_ctrl); ++ ++static irqreturn_t aspeed_udma_isr(int irq, void *arg) ++{ ++ u32 bit; ++ unsigned long tx_sts = readl(udma->regs + UDMA_TX_DMA_INT_STS); ++ unsigned long rx_sts = readl(udma->regs + UDMA_RX_DMA_INT_STS); ++ ++ if (udma != (struct aspeed_udma *)arg) ++ return IRQ_NONE; ++ ++ if (tx_sts == 0 && rx_sts == 0) ++ return IRQ_NONE; ++ ++ for_each_set_bit(bit, &tx_sts, UDMA_MAX_CHANNEL) { ++ writel((0x1 << bit), udma->regs + UDMA_TX_DMA_INT_STS); ++ if (udma->tx_chs[bit].cb) ++ udma->tx_chs[bit].cb(aspeed_udma_get_tx_rptr(bit), ++ udma->tx_chs[bit].cb_arg); ++ } ++ ++ for_each_set_bit(bit, &rx_sts, UDMA_MAX_CHANNEL) { ++ writel((0x1 << bit), udma->regs + UDMA_RX_DMA_INT_STS); ++ if (udma->rx_chs[bit].cb) ++ udma->rx_chs[bit].cb(aspeed_udma_get_rx_wptr(bit), ++ udma->rx_chs[bit].cb_arg); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int aspeed_udma_probe(struct platform_device *pdev) ++{ ++ int i, rc; ++ uint32_t reg; ++ struct resource *res; ++ struct device *dev = &pdev->dev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (IS_ERR_OR_NULL(res)) { ++ dev_err(dev, "failed to get register base\n"); ++ return -ENODEV; ++ } ++ ++ udma->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR_OR_NULL(udma->regs)) { ++ dev_err(dev, "failed to map registers\n"); ++ return PTR_ERR(udma->regs); ++ } ++ ++ /* disable for safety */ ++ writel(0x0, udma->regs + UDMA_TX_DMA_EN); ++ writel(0x0, udma->regs + UDMA_RX_DMA_EN); ++ ++ udma->irq = platform_get_irq(pdev, 0); ++ if (udma->irq < 0) { ++ dev_err(dev, "failed to get IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ rc = devm_request_irq(dev, udma->irq, aspeed_udma_isr, ++ IRQF_SHARED, DEVICE_NAME, udma); ++ if (rc) { ++ dev_err(dev, "failed to request IRQ handler\n"); ++ return rc; ++ } ++ ++ /* ++ * For legacy design. ++ * - TX ringbuffer size: 4KB ++ * - RX ringbuffer size: 64KB ++ * - Timeout timer disabled ++ */ ++ reg = FIELD_PREP(UDMA_MISC_TX_BUFSZ, UDMA_BUFSZ_CODE_4KB) | ++ FIELD_PREP(UDMA_MISC_RX_BUFSZ, UDMA_BUFSZ_CODE_64KB); ++ writel(reg, udma->regs + UDMA_MISC); ++ ++ for (i = 0; i < UDMA_MAX_CHANNEL; ++i) { ++ writel(0, udma->regs + UDMA_CHX_TX_WR_PTR(i)); ++ writel(0, udma->regs + UDMA_CHX_RX_RD_PTR(i)); ++ } ++ ++ writel(0xffffffff, udma->regs + UDMA_TX_DMA_RST); ++ writel(0x0, udma->regs + UDMA_TX_DMA_RST); ++ ++ writel(0xffffffff, udma->regs + UDMA_RX_DMA_RST); ++ writel(0x0, udma->regs + UDMA_RX_DMA_RST); ++ ++ writel(0x0, udma->regs + UDMA_TX_DMA_INT_EN); ++ writel(0xffffffff, udma->regs + UDMA_TX_DMA_INT_STS); ++ writel(0x0, udma->regs + UDMA_RX_DMA_INT_EN); ++ writel(0xffffffff, udma->regs + UDMA_RX_DMA_INT_STS); ++ ++ writel(UDMA_TMOUT, udma->regs + UDMA_TMOUT_TIMER); ++ ++ spin_lock_init(&udma->lock); ++ ++ dev_set_drvdata(dev, udma); ++ ++ return 0; ++} ++ ++static const struct of_device_id aspeed_udma_match[] = { ++ { .compatible = "aspeed,ast2500-udma" }, ++ { .compatible = "aspeed,ast2600-udma" }, ++ { .compatible = "aspeed,ast2700-udma" }, ++ { }, ++}; ++ ++static struct platform_driver aspeed_udma_driver = { ++ .driver = { ++ .name = DEVICE_NAME, ++ .of_match_table = aspeed_udma_match, ++ ++ }, ++ .probe = aspeed_udma_probe, ++}; ++ ++module_platform_driver(aspeed_udma_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Aspeed UDMA Engine Driver"); +diff --git a/drivers/soc/aspeed/aspeed-usb-hp.c b/drivers/soc/aspeed/aspeed-usb-hp.c +--- a/drivers/soc/aspeed/aspeed-usb-hp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-usb-hp.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,152 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2021 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define USB_HP_BEHCI84 0x84 /* Controller Fine-tune Register */ ++ ++static const struct of_device_id aspeed_usb_hp_dt_ids[] = { ++ { ++ .compatible = "aspeed,ast2600-usb2ahp", ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb3ahp", ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb3bhp", ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb2ahp", ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb2bhp", ++ }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, aspeed_usb_hp_dt_ids); ++ ++static int aspeed_usb_hp_probe(struct platform_device *pdev) ++{ ++ struct device_node *node = pdev->dev.of_node; ++ void __iomem *regs; ++ bool ehci_32bits_quirk; ++ u32 val; ++ struct clk *clk; ++ struct reset_control *rst; ++ struct regmap *device; ++ struct phy *usb3_phy; ++ bool is_pcie_xhci; ++ int rc = 0; ++ ++ if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2600-usb2ahp")) { ++ dev_info(&pdev->dev, "Initialized AST2600 USB2AHP\n"); ++ return 0; ++ } ++ ++ if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-usb3ahp") || ++ of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-usb3bhp")) { ++ is_pcie_xhci = true; ++ } else if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-usb2ahp") || ++ of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-usb2bhp")) { ++ is_pcie_xhci = false; ++ } ++ clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ ++ rc = clk_prepare_enable(clk); ++ if (rc) { ++ dev_err(&pdev->dev, "Unable to enable clock (%d)\n", rc); ++ return rc; ++ } ++ ++ rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); ++ if (IS_ERR(rst)) { ++ rc = PTR_ERR(rst); ++ goto err; ++ } ++ rc = reset_control_deassert(rst); ++ if (rc) ++ goto err; ++ ++ device = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "aspeed,device"); ++ if (IS_ERR(device)) { ++ dev_err(&pdev->dev, "failed to find device regmap\n"); ++ goto err; ++ } ++ ++ if (is_pcie_xhci) { ++ usb3_phy = devm_phy_get(&pdev->dev, "usb3-phy"); ++ if (IS_ERR(usb3_phy)) { ++ rc = dev_err_probe(&pdev->dev, PTR_ERR(usb3_phy), ++ "failed to get usb3 phy\n"); ++ goto err; ++ } ++ rc = phy_init(usb3_phy); ++ if (rc < 0) { ++ dev_err(&pdev->dev, "failed to init usb3 phy\n"); ++ goto err; ++ } ++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev ++ /* Turn on PCIe xHCI without MSI */ ++ regmap_update_bits(device, 0x70, ++ BIT(19) | BIT(11) | BIT(3), ++ BIT(19) | BIT(11) | BIT(3)); ++ } else { ++ ehci_32bits_quirk = ++ device_property_read_bool(&pdev->dev, "aspeed,ehci_32bits_quirk"); ++ ++ if (ehci_32bits_quirk) { ++ regs = of_iomap(node, 0); ++ val = readl(regs + USB_HP_BEHCI84) & ~BIT(11); ++ writel(val, regs + USB_HP_BEHCI84); ++ } ++ ++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev ++ /* Turn on PCIe EHCI without MSI */ ++ regmap_update_bits(device, 0x70, ++ BIT(18) | BIT(10) | BIT(2), ++ BIT(18) | BIT(10) | BIT(2)); ++ } ++ dev_info(&pdev->dev, "Initialized AST2700 USB Host PCIe\n"); ++ return 0; ++err: ++ if (clk) ++ clk_disable_unprepare(clk); ++ return rc; ++} ++ ++static void aspeed_usb_hp_remove(struct platform_device *pdev) ++{ ++ dev_info(&pdev->dev, "Remove USB Host PCIe\n"); ++} ++ ++static struct platform_driver aspeed_usb_hp_driver = { ++ .probe = aspeed_usb_hp_probe, ++ .remove = aspeed_usb_hp_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_usb_hp_dt_ids, ++ }, ++}; ++module_platform_driver(aspeed_usb_hp_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Neal Liu "); +diff --git a/drivers/soc/aspeed/aspeed-usb-phy.c b/drivers/soc/aspeed/aspeed-usb-phy.c +--- a/drivers/soc/aspeed/aspeed-usb-phy.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-usb-phy.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,112 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2021 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct usb_phy_ctrl { ++ u32 offset; ++ u32 value; ++}; ++ ++static const struct of_device_id aspeed_usb_phy_dt_ids[] = { ++ { ++ .compatible = "aspeed,ast2600-uphyb", ++ }, ++ { ++ .compatible = "aspeed,ast2700-uphy2a", ++ }, ++ { ++ .compatible = "aspeed,ast2700-uphy2b", ++ }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, aspeed_usb_phy_dt_ids); ++ ++static int aspeed_usb_phy_probe(struct platform_device *pdev) ++{ ++ struct device_node *node = pdev->dev.of_node; ++ struct usb_phy_ctrl *ctrl_data; ++ void __iomem *base; ++ struct regmap *scu; ++ int ctrl_num = 1; ++ int ret, i; ++ u32 val; ++ ++ scu = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "aspeed,scu"); ++ if (IS_ERR(scu)) { ++ dev_err(&pdev->dev, "cannot to find SCU regmap\n"); ++ return -ENODEV; ++ } ++ ++ if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2600-uphyb")) { ++ /* Check SCU040[3] USB port B controller reset is deassert */ ++ regmap_read(scu, 0x40, &val); ++ if ((val & BIT(3))) ++ return -EPROBE_DEFER; ++ } ++ ++ if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-uphy2a")) { ++ /* Check SCU220[0] USB vHubA1 controller reset is deassert */ ++ regmap_read(scu, 0x220, &val); ++ if ((val & BIT(0))) ++ return -EPROBE_DEFER; ++ } ++ ++ if (of_device_is_compatible(pdev->dev.of_node, ++ "aspeed,ast2700-uphy2b")) { ++ /* Check SCU220[3] USB vHubB1 controller reset is deassert */ ++ regmap_read(scu, 0x220, &val); ++ if ((val & BIT(3))) ++ return -EPROBE_DEFER; ++ } ++ ++ ctrl_data = devm_kzalloc(&pdev->dev, ++ sizeof(struct usb_phy_ctrl) * ctrl_num, ++ GFP_KERNEL); ++ if (!ctrl_data) ++ return -ENOMEM; ++ ++ base = of_iomap(node, 0); ++ ++ ret = of_property_read_u32_array(node, "ctrl", (u32 *)ctrl_data, ++ ctrl_num * 2); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "Could not read ctrl property\n"); ++ return -EINVAL; ++ } ++ ++ for (i = 0; i < ctrl_num; i++) ++ writel(ctrl_data[i].value, base + ctrl_data[i].offset); ++ ++ dev_info(&pdev->dev, "Initialized USB PHY\n"); ++ ++ return 0; ++} ++ ++static void aspeed_usb_phy_remove(struct platform_device *pdev) ++{ ++} ++ ++static struct platform_driver aspeed_usb_phy_driver = { ++ .probe = aspeed_usb_phy_probe, ++ .remove = aspeed_usb_phy_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_usb_phy_dt_ids, ++ }, ++}; ++module_platform_driver(aspeed_usb_phy_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Neal Liu "); +diff --git a/drivers/soc/aspeed/aspeed-xdma.c b/drivers/soc/aspeed/aspeed-xdma.c +--- a/drivers/soc/aspeed/aspeed-xdma.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/aspeed-xdma.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,1438 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++// Copyright IBM Corp 2019 ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DEVICE_NAME "aspeed-xdma" ++ ++#define SCU_AST2600_MISC_CTRL 0x0c0 ++#define SCU_AST2600_MISC_CTRL_XDMA_BMC BIT(8) ++#define SCU_AST2700_MISC_CTRL_XDMA_CLIENT BIT(4) ++ ++#define SCU_AST2600_DEBUG_CTRL 0x0c8 ++#define DEBUG_CTRL_AST2600_XDMA_DISABLE BIT(2) ++#define DEBUG_CTRL_AST2700_XDMA_DISABLE BIT(8) ++ ++#define SCU_AST2500_PCIE_CONF 0x180 ++#define SCU_AST2600_PCIE_CONF 0xc20 ++#define SCU_AST2700_PCIE0_CONF 0x970 ++#define SCU_AST2700_PCIE1_CONF 0x9B0 ++#define SCU_PCIE_CONF_VGA_EN BIT(0) ++#define SCU_PCIE_CONF_VGA_EN_MMIO BIT(1) ++#define SCU_PCIE_CONF_VGA_EN_LPC BIT(2) ++#define SCU_PCIE_CONF_VGA_EN_MSI BIT(3) ++#define SCU_PCIE_CONF_VGA_EN_MCTP BIT(4) ++#define SCU_PCIE_CONF_VGA_EN_IRQ BIT(5) ++#define SCU_PCIE_CONF_VGA_EN_DMA BIT(6) ++#define SCU_PCIE_CONF_BMC_EN BIT(8) ++#define SCU_PCIE_CONF_BMC_EN_MMIO BIT(9) ++#define SCU_PCIE_CONF_BMC_EN_MSI BIT(11) ++#define SCU_PCIE_CONF_BMC_EN_MCTP BIT(12) ++#define SCU_PCIE_CONF_BMC_EN_IRQ BIT(13) ++#define SCU_PCIE_CONF_BMC_EN_DMA BIT(14) ++ ++#define SCU_AST2700_PCIE0_CTRL 0xa60 ++#define SCU_AST2700_PCIE1_CTRL 0xae0 ++#define SCU_AST2700_PCIE_CTRL_DMA_EN BIT(2) ++ ++#define SCU_AST2500_BMC_CLASS_REV 0x19c ++#define SCU_AST2600_BMC_CLASS_REV 0xc68 ++#define SCU_AST2700_PCIE0_BMC_CLASS_REV 0xa18 ++#define SCU_AST2700_PCIE1_BMC_CLASS_REV 0xa98 ++#define SCU_BMC_CLASS_REV_XDMA 0xff000001 ++#define SCU_BMC_CLASS_REV_MASK 0xffffff00 ++ ++#define XDMA_CMDQ_SIZE PAGE_SIZE ++#define XDMA_NUM_CMDS \ ++ (XDMA_CMDQ_SIZE / sizeof(struct aspeed_xdma_cmd)) ++ ++/* Aspeed specification requires 100us after disabling the reset */ ++#define XDMA_ENGINE_SETUP_TIME_MAX_US 1000 ++#define XDMA_ENGINE_SETUP_TIME_MIN_US 100 ++ ++#define XDMA_CMD_AST2500_PITCH_SHIFT 3 ++#define XDMA_CMD_AST2500_PITCH_BMC GENMASK_ULL(62, 51) ++#define XDMA_CMD_AST2500_PITCH_HOST GENMASK_ULL(46, 35) ++#define XDMA_CMD_AST2500_PITCH_UPSTREAM BIT_ULL(31) ++#define XDMA_CMD_AST2500_PITCH_ADDR GENMASK_ULL(29, 4) ++#define XDMA_CMD_AST2500_PITCH_ID BIT_ULL(0) ++#define XDMA_CMD_AST2500_CMD_IRQ_EN BIT_ULL(31) ++#define XDMA_CMD_AST2500_CMD_LINE_NO GENMASK_ULL(27, 16) ++#define XDMA_CMD_AST2500_CMD_IRQ_BMC BIT_ULL(15) ++#define XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT 4 ++#define XDMA_CMD_AST2500_CMD_LINE_SIZE \ ++ GENMASK_ULL(14, XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT) ++#define XDMA_CMD_AST2500_CMD_ID BIT_ULL(1) ++ ++#define XDMA_CMD_AST2600_PITCH_BMC GENMASK_ULL(62, 48) ++#define XDMA_CMD_AST2600_PITCH_HOST GENMASK_ULL(46, 32) ++#define XDMA_CMD_AST2600_PITCH_ADDR GENMASK_ULL(30, 0) ++#define XDMA_CMD_AST2600_CMD_64_EN BIT_ULL(40) ++#define XDMA_CMD_AST2600_CMD_IRQ_BMC BIT_ULL(37) ++#define XDMA_CMD_AST2600_CMD_IRQ_HOST BIT_ULL(36) ++#define XDMA_CMD_AST2600_CMD_UPSTREAM BIT_ULL(32) ++#define XDMA_CMD_AST2600_CMD_LINE_NO GENMASK_ULL(27, 16) ++#define XDMA_CMD_AST2600_CMD_LINE_SIZE GENMASK_ULL(14, 0) ++#define XDMA_CMD_AST2600_CMD_MULTILINE_SIZE GENMASK_ULL(14, 12) ++ ++#define XDMA_CMD_AST2700_PITCH_BMC GENMASK_ULL(62, 48) ++#define XDMA_CMD_AST2700_PITCH_HOST GENMASK_ULL(46, 32) ++#define XDMA_CMD_AST2700_CMD_64_EN BIT_ULL(40) ++#define XDMA_CMD_AST2700_CMD_IRQ_BMC BIT_ULL(37) ++#define XDMA_CMD_AST2700_CMD_UPSTREAM BIT_ULL(32) ++#define XDMA_CMD_AST2700_CMD_LINE_NO GENMASK_ULL(27, 16) ++#define XDMA_CMD_AST2700_CMD_LINE_SIZE GENMASK_ULL(14, 0) ++#define XDMA_CMD_AST2700_CMD_MULTILINE_SIZE GENMASK_ULL(14, 12) ++#define XDMA_CMD_AST2700_BMC_ADDR GENMASK_ULL(34, 0) ++ ++#define XDMA_AST2500_QUEUE_ENTRY_SIZE 4 ++#define XDMA_AST2500_HOST_CMDQ_ADDR0 0x00 ++#define XDMA_AST2500_HOST_CMDQ_ENDP 0x04 ++#define XDMA_AST2500_HOST_CMDQ_WRITEP 0x08 ++#define XDMA_AST2500_HOST_CMDQ_READP 0x0c ++#define XDMA_AST2500_BMC_CMDQ_ADDR 0x10 ++#define XDMA_AST2500_BMC_CMDQ_ENDP 0x14 ++#define XDMA_AST2500_BMC_CMDQ_WRITEP 0x18 ++#define XDMA_AST2500_BMC_CMDQ_READP 0x1c ++#define XDMA_BMC_CMDQ_READP_RESET 0xee882266 ++#define XDMA_AST2500_CTRL 0x20 ++#define XDMA_AST2500_CTRL_US_COMP BIT(4) ++#define XDMA_AST2500_CTRL_DS_COMP BIT(5) ++#define XDMA_AST2500_CTRL_DS_DIRTY BIT(6) ++#define XDMA_AST2500_CTRL_DS_SIZE_256 BIT(17) ++#define XDMA_AST2500_CTRL_DS_TIMEOUT BIT(28) ++#define XDMA_AST2500_CTRL_DS_CHECK_ID BIT(29) ++#define XDMA_AST2500_STATUS 0x24 ++#define XDMA_AST2500_STATUS_US_COMP BIT(4) ++#define XDMA_AST2500_STATUS_DS_COMP BIT(5) ++#define XDMA_AST2500_STATUS_DS_DIRTY BIT(6) ++#define XDMA_AST2500_INPRG_DS_CMD1 0x38 ++#define XDMA_AST2500_INPRG_DS_CMD2 0x3c ++#define XDMA_AST2500_INPRG_US_CMD00 0x40 ++#define XDMA_AST2500_INPRG_US_CMD01 0x44 ++#define XDMA_AST2500_INPRG_US_CMD10 0x48 ++#define XDMA_AST2500_INPRG_US_CMD11 0x4c ++#define XDMA_AST2500_INPRG_US_CMD20 0x50 ++#define XDMA_AST2500_INPRG_US_CMD21 0x54 ++#define XDMA_AST2500_HOST_CMDQ_ADDR1 0x60 ++#define XDMA_AST2500_VGA_CMDQ_ADDR0 0x64 ++#define XDMA_AST2500_VGA_CMDQ_ENDP 0x68 ++#define XDMA_AST2500_VGA_CMDQ_WRITEP 0x6c ++#define XDMA_AST2500_VGA_CMDQ_READP 0x70 ++#define XDMA_AST2500_VGA_CMD_STATUS 0x74 ++#define XDMA_AST2500_VGA_CMDQ_ADDR1 0x78 ++ ++#define XDMA_AST2600_QUEUE_ENTRY_SIZE 2 ++#define XDMA_AST2600_HOST_CMDQ_ADDR0 0x00 ++#define XDMA_AST2600_HOST_CMDQ_ADDR1 0x04 ++#define XDMA_AST2600_HOST_CMDQ_ENDP 0x08 ++#define XDMA_AST2600_HOST_CMDQ_WRITEP 0x0c ++#define XDMA_AST2600_HOST_CMDQ_READP 0x10 ++#define XDMA_AST2600_BMC_CMDQ_ADDR 0x14 ++#define XDMA_AST2600_BMC_CMDQ_ENDP 0x18 ++#define XDMA_AST2600_BMC_CMDQ_WRITEP 0x1c ++#define XDMA_AST2600_BMC_CMDQ_READP 0x20 ++#define XDMA_AST2600_VGA_CMDQ_ADDR0 0x24 ++#define XDMA_AST2600_VGA_CMDQ_ADDR1 0x28 ++#define XDMA_AST2600_VGA_CMDQ_ENDP 0x2c ++#define XDMA_AST2600_VGA_CMDQ_WRITEP 0x30 ++#define XDMA_AST2600_VGA_CMDQ_READP 0x34 ++#define XDMA_AST2600_CTRL 0x38 ++#define XDMA_AST2600_CTRL_US_COMP BIT(16) ++#define XDMA_AST2600_CTRL_DS_COMP BIT(17) ++#define XDMA_AST2600_CTRL_DS_DIRTY BIT(18) ++#define XDMA_AST2600_CTRL_DS_SIZE_256 BIT(20) ++#define XDMA_AST2600_STATUS 0x3c ++#define XDMA_AST2600_STATUS_US_COMP BIT(16) ++#define XDMA_AST2600_STATUS_DS_COMP BIT(17) ++#define XDMA_AST2600_STATUS_DS_DIRTY BIT(18) ++#define XDMA_AST2600_INPRG_DS_CMD00 0x40 ++#define XDMA_AST2600_INPRG_DS_CMD01 0x44 ++#define XDMA_AST2600_INPRG_DS_CMD10 0x48 ++#define XDMA_AST2600_INPRG_DS_CMD11 0x4c ++#define XDMA_AST2600_INPRG_DS_CMD20 0x50 ++#define XDMA_AST2600_INPRG_DS_CMD21 0x54 ++#define XDMA_AST2600_INPRG_US_CMD00 0x60 ++#define XDMA_AST2600_INPRG_US_CMD01 0x64 ++#define XDMA_AST2600_INPRG_US_CMD10 0x68 ++#define XDMA_AST2600_INPRG_US_CMD11 0x6c ++#define XDMA_AST2600_INPRG_US_CMD20 0x70 ++#define XDMA_AST2600_INPRG_US_CMD21 0x74 ++ ++#define XDMA_AST2700_QUEUE_ENTRY_SIZE 2 ++#define XDMA_AST2700_BMC_CMDQ_ADDR0 0x10 ++#define XDMA_AST2700_BMC_CMDQ_ADDR1 0x14 ++#define XDMA_AST2700_BMC_CMDQ_ENDP 0x18 ++#define XDMA_AST2700_BMC_CMDQ_WRITEP 0x1c ++#define XDMA_AST2700_BMC_CMDQ_READP 0x20 ++#define XDMA_AST2700_CTRL 0x38 ++#define XDMA_AST2700_CTRL_US_COMP BIT(16) ++#define XDMA_AST2700_CTRL_DS_COMP BIT(17) ++#define XDMA_AST2700_CTRL_DS_DIRTY BIT(18) ++#define XDMA_AST2700_CTRL_IDLE BIT(19) ++#define XDMA_AST2700_CTRL_DS_SIZE_256 BIT(20) ++#define XDMA_AST2700_STATUS 0x3c ++#define XDMA_AST2700_STATUS_US_COMP BIT(16) ++#define XDMA_AST2700_STATUS_DS_COMP BIT(17) ++#define XDMA_AST2700_STATUS_DS_DIRTY BIT(18) ++#define XDMA_AST2700_STATUS_IDLE BIT(19) ++#define XDMA_AST2700_INPRG_DS_CMD00 0x40 ++#define XDMA_AST2700_INPRG_DS_CMD01 0x44 ++#define XDMA_AST2700_INPRG_DS_CMD10 0x48 ++#define XDMA_AST2700_INPRG_DS_CMD11 0x4c ++#define XDMA_AST2700_INPRG_DS_CMD20 0x50 ++#define XDMA_AST2700_INPRG_DS_CMD21 0x54 ++#define XDMA_AST2700_INPRG_US_CMD00 0x60 ++#define XDMA_AST2700_INPRG_US_CMD01 0x64 ++#define XDMA_AST2700_INPRG_US_CMD10 0x68 ++#define XDMA_AST2700_INPRG_US_CMD11 0x6c ++#define XDMA_AST2700_INPRG_US_CMD20 0x70 ++#define XDMA_AST2700_INPRG_US_CMD21 0x74 ++ ++struct aspeed_xdma_cmd { ++ u64 host_addr; ++ u64 pitch; ++ u64 cmd; ++ u64 reserved; ++}; ++ ++struct aspeed_xdma_regs { ++ u8 bmc_cmdq_addr; ++ u8 bmc_cmdq_addr_ext; ++ u8 bmc_cmdq_endp; ++ u8 bmc_cmdq_writep; ++ u8 bmc_cmdq_readp; ++ u8 control; ++ u8 status; ++}; ++ ++struct aspeed_xdma_status_bits { ++ u32 us_comp; ++ u32 ds_comp; ++ u32 ds_dirty; ++}; ++ ++struct aspeed_xdma; ++ ++struct aspeed_xdma_chip { ++ u32 control; ++ u32 scu_bmc_class; ++ u32 scu_misc_ctrl; ++ u32 scu_misc_mask; ++ u32 scu_disable_mask; ++ u32 scu_pcie_conf; ++ u32 scu_pcie_ctrl; ++ unsigned int queue_entry_size; ++ struct aspeed_xdma_regs regs; ++ struct aspeed_xdma_status_bits status_bits; ++ unsigned int (*set_cmd)(struct aspeed_xdma *ctx, ++ struct aspeed_xdma_cmd cmds[2], ++ struct aspeed_xdma_op *op, u64 bmc_addr); ++}; ++ ++struct aspeed_xdma_client; ++ ++struct aspeed_xdma { ++ struct kobject kobj; ++ const struct aspeed_xdma_chip *chip; ++ ++ int irq; ++ int pcie_irq; ++ struct clk *clock; ++ struct device *dev; ++ void __iomem *base; ++ resource_size_t res_size; ++ resource_size_t res_start; ++ struct reset_control *reset; ++ struct reset_control *reset_rc; ++ ++ /* Protects current_client */ ++ spinlock_t client_lock; ++ struct aspeed_xdma_client *current_client; ++ ++ /* Protects engine configuration */ ++ spinlock_t engine_lock; ++ struct aspeed_xdma_cmd *cmdq; ++ unsigned int cmd_idx; ++ bool in_reset; ++ bool upstream; ++ ++ /* Queue waiters for idle engine */ ++ wait_queue_head_t wait; ++ ++ struct work_struct reset_work; ++ ++ phys_addr_t mem_phys; ++ phys_addr_t mem_size; ++ void *mem_virt; ++ dma_addr_t mem_coherent; ++ dma_addr_t cmdq_phys; ++ struct gen_pool *pool; ++ ++ struct miscdevice misc; ++}; ++ ++struct aspeed_xdma_client { ++ struct aspeed_xdma *ctx; ++ ++ bool error; ++ bool in_progress; ++ void *virt; ++ dma_addr_t phys; ++ u32 size; ++}; ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++static u32 aspeed_xdma_readl(struct aspeed_xdma *ctx, u8 reg) ++{ ++ u32 v = readl(ctx->base + reg); ++ ++ dev_dbg(ctx->dev, "read %02x[%08x]\n", reg, v); ++ return v; ++} ++ ++static void aspeed_xdma_writel(struct aspeed_xdma *ctx, u8 reg, u32 val) ++{ ++ writel(val, ctx->base + reg); ++ dev_dbg(ctx->dev, "write %02x[%08x]\n", reg, val); ++} ++ ++static void aspeed_xdma_init_eng(struct aspeed_xdma *ctx) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctx->engine_lock, flags); ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_endp, ++ ctx->chip->queue_entry_size * XDMA_NUM_CMDS); ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_readp, ++ XDMA_BMC_CMDQ_READP_RESET); ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep, 0); ++ aspeed_xdma_writel(ctx, ctx->chip->regs.control, ctx->chip->control); ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr, ctx->cmdq_phys); ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ if (ctx->chip->regs.bmc_cmdq_addr_ext) ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr_ext, ctx->cmdq_phys >> 32); ++#endif ++ ++ ctx->cmd_idx = 0; ++ spin_unlock_irqrestore(&ctx->engine_lock, flags); ++} ++ ++static unsigned int aspeed_xdma_ast2500_set_cmd(struct aspeed_xdma *ctx, ++ struct aspeed_xdma_cmd cmds[2], ++ struct aspeed_xdma_op *op, ++ u64 bmc_addr) ++{ ++ unsigned int rc = 1; ++ unsigned int pitch = 1; ++ unsigned int line_no = 1; ++ unsigned int line_size = op->len >> ++ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT; ++ u64 cmd = XDMA_CMD_AST2500_CMD_IRQ_EN | XDMA_CMD_AST2500_CMD_IRQ_BMC | ++ XDMA_CMD_AST2500_CMD_ID; ++ u64 cmd_pitch = (op->direction ? XDMA_CMD_AST2500_PITCH_UPSTREAM : 0) | ++ XDMA_CMD_AST2500_PITCH_ID; ++ ++ dev_dbg(ctx->dev, "xdma %s ast2500: bmc[%08llx] len[%08x] host[%08x]\n", ++ op->direction ? "upstream" : "downstream", bmc_addr, op->len, ++ (u32)op->host_addr); ++ ++ if (op->len > XDMA_CMD_AST2500_CMD_LINE_SIZE) { ++ unsigned int rem; ++ unsigned int total; ++ ++ line_no = op->len / XDMA_CMD_AST2500_CMD_LINE_SIZE; ++ total = XDMA_CMD_AST2500_CMD_LINE_SIZE * line_no; ++ rem = (op->len - total) >> ++ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT; ++ line_size = XDMA_CMD_AST2500_CMD_LINE_SIZE; ++ pitch = line_size >> XDMA_CMD_AST2500_PITCH_SHIFT; ++ line_size >>= XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT; ++ ++ if (rem) { ++ u32 rbmc = bmc_addr + total; ++ ++ cmds[1].host_addr = op->host_addr + (u64)total; ++ cmds[1].pitch = cmd_pitch | ++ ((u64)rbmc & XDMA_CMD_AST2500_PITCH_ADDR) | ++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, 1) | ++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, 1); ++ cmds[1].cmd = cmd | ++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, 1) | ++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE, ++ rem); ++ cmds[1].reserved = 0ULL; ++ ++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET, ++ 16, 1, &cmds[1], sizeof(*cmds), ++ true); ++ ++ cmd &= ~(XDMA_CMD_AST2500_CMD_IRQ_EN | ++ XDMA_CMD_AST2500_CMD_IRQ_BMC); ++ ++ rc++; ++ } ++ } ++ ++ cmds[0].host_addr = op->host_addr; ++ cmds[0].pitch = cmd_pitch | ++ (bmc_addr & XDMA_CMD_AST2500_PITCH_ADDR) | ++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, pitch) | ++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, pitch); ++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, line_no) | ++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE, line_size); ++ cmds[0].reserved = 0ULL; ++ ++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds, ++ sizeof(*cmds), true); ++ ++ return rc; ++} ++ ++static unsigned int aspeed_xdma_ast2600_set_cmd(struct aspeed_xdma *ctx, ++ struct aspeed_xdma_cmd cmds[2], ++ struct aspeed_xdma_op *op, ++ u64 bmc_addr) ++{ ++ unsigned int rc = 1; ++ unsigned int pitch = 1; ++ unsigned int line_no = 1; ++ unsigned int line_size = op->len; ++ u64 cmd = XDMA_CMD_AST2600_CMD_IRQ_BMC | ++ (op->direction ? XDMA_CMD_AST2600_CMD_UPSTREAM : 0); ++ ++ if (op->host_addr & 0xffffffff00000000ULL || ++ (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL) ++ cmd |= XDMA_CMD_AST2600_CMD_64_EN; ++ ++ dev_dbg(ctx->dev, "xdma %s ast2600: bmc[%08llx] len[%08x] host[%016llx]\n", ++ op->direction ? "upstream" : "downstream", ++ bmc_addr, op->len, op->host_addr); ++ ++ if ((op->host_addr & 0xff) + op->len > XDMA_CMD_AST2600_CMD_LINE_SIZE) { ++ unsigned int rem; ++ unsigned int total; ++ ++ line_no = op->len / XDMA_CMD_AST2600_CMD_MULTILINE_SIZE; ++ total = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE * line_no; ++ rem = op->len - total; ++ line_size = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE; ++ pitch = line_size; ++ ++ if (rem) { ++ u32 rbmc = bmc_addr + total; ++ ++ cmds[1].host_addr = op->host_addr + (u64)total; ++ cmds[1].pitch = ++ ((u64)rbmc & XDMA_CMD_AST2600_PITCH_ADDR) | ++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, 1) | ++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, 1); ++ cmds[1].cmd = cmd | ++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, 1) | ++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE, ++ rem); ++ cmds[1].reserved = 0ULL; ++ ++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET, ++ 16, 1, &cmds[1], sizeof(*cmds), ++ true); ++ ++ cmd &= ~XDMA_CMD_AST2600_CMD_IRQ_BMC; ++ ++ rc++; ++ } ++ } ++ ++ cmds[0].host_addr = op->host_addr; ++ cmds[0].pitch = (bmc_addr & XDMA_CMD_AST2600_PITCH_ADDR) | ++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, pitch) | ++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, pitch); ++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, line_no) | ++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE, line_size); ++ cmds[0].reserved = 0ULL; ++ ++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds, ++ sizeof(*cmds), true); ++ ++ return rc; ++} ++ ++static unsigned int aspeed_xdma_ast2700_set_cmd(struct aspeed_xdma *ctx, ++ struct aspeed_xdma_cmd cmds[2], ++ struct aspeed_xdma_op *op, ++ u64 bmc_addr) ++{ ++ unsigned int rc = 1; ++ unsigned int pitch = 1; ++ unsigned int line_no = 1; ++ unsigned int line_size = op->len; ++ u64 cmd = XDMA_CMD_AST2700_CMD_IRQ_BMC | ++ (op->direction ? XDMA_CMD_AST2700_CMD_UPSTREAM : 0); ++ ++ if (op->host_addr & 0xffffffff00000000ULL || ++ (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL) ++ cmd |= XDMA_CMD_AST2700_CMD_64_EN; ++ ++ dev_dbg(ctx->dev, "xdma %s ast2700: bmc[%016llx] len[%08x] host[%016llx]\n", ++ op->direction ? "upstream" : "downstream", ++ bmc_addr, op->len, op->host_addr); ++ ++ if (op->len > XDMA_CMD_AST2700_CMD_LINE_SIZE) { ++ unsigned int rem; ++ unsigned int total; ++ ++ line_no = op->len / XDMA_CMD_AST2700_CMD_MULTILINE_SIZE; ++ total = XDMA_CMD_AST2700_CMD_MULTILINE_SIZE * line_no; ++ rem = op->len - total; ++ line_size = XDMA_CMD_AST2700_CMD_MULTILINE_SIZE; ++ pitch = line_size; ++ ++ if (rem) { ++ u64 rbmc = bmc_addr + total; ++ ++ cmds[1].host_addr = op->host_addr + (u64)total; ++ cmds[1].pitch = ++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_HOST, 1) | ++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_BMC, 1); ++ cmds[1].cmd = cmd | ++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_NO, 1) | ++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_SIZE, ++ rem); ++ cmds[1].reserved = rbmc & XDMA_CMD_AST2700_BMC_ADDR; ++ ++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET, ++ 16, 1, &cmds[1], sizeof(*cmds), ++ true); ++ ++ cmd &= ~XDMA_CMD_AST2700_CMD_IRQ_BMC; ++ ++ rc++; ++ } ++ } ++ cmds[0].host_addr = op->host_addr; ++ cmds[0].pitch = ++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_HOST, pitch) | ++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_BMC, pitch); ++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_NO, line_no) | ++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_SIZE, line_size); ++ cmds[0].reserved = bmc_addr & XDMA_CMD_AST2700_BMC_ADDR; ++ ++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds, ++ sizeof(*cmds), true); ++ ++ return rc; ++} ++ ++static int aspeed_xdma_start(struct aspeed_xdma *ctx, unsigned int num_cmds, ++ struct aspeed_xdma_cmd cmds[2], bool upstream, ++ struct aspeed_xdma_client *client) ++{ ++ unsigned int i; ++ int rc = -EBUSY; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctx->engine_lock, flags); ++ if (ctx->in_reset) ++ goto unlock; ++ ++ spin_lock(&ctx->client_lock); ++ if (ctx->current_client) { ++ spin_unlock(&ctx->client_lock); ++ goto unlock; ++ } ++ ++ client->error = false; ++ client->in_progress = true; ++ ctx->current_client = client; ++ spin_unlock(&ctx->client_lock); ++ ++ ctx->upstream = upstream; ++ for (i = 0; i < num_cmds; ++i) { ++ trace_xdma_start(ctx, &cmds[i]); ++ /* ++ * Use memcpy_toio here to get some barriers before starting ++ * the operation. The command(s) need to be in physical memory ++ * before the XDMA engine starts. ++ */ ++ memcpy_toio(&ctx->cmdq[ctx->cmd_idx], &cmds[i], ++ sizeof(struct aspeed_xdma_cmd)); ++ ctx->cmd_idx = (ctx->cmd_idx + 1) % XDMA_NUM_CMDS; ++ } ++ ++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep, ++ ctx->cmd_idx * ctx->chip->queue_entry_size); ++ rc = 0; ++ ++unlock: ++ spin_unlock_irqrestore(&ctx->engine_lock, flags); ++ return rc; ++} ++ ++static void aspeed_xdma_done(struct aspeed_xdma *ctx, bool error) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&ctx->client_lock, flags); ++ if (ctx->current_client) { ++ ctx->current_client->error = error; ++ ctx->current_client->in_progress = false; ++ ctx->current_client = NULL; ++ } ++ spin_unlock_irqrestore(&ctx->client_lock, flags); ++ ++ wake_up_interruptible_all(&ctx->wait); ++} ++ ++static irqreturn_t aspeed_xdma_irq(int irq, void *arg) ++{ ++ struct aspeed_xdma *ctx = arg; ++ u32 status; ++ ++ spin_lock(&ctx->engine_lock); ++ status = aspeed_xdma_readl(ctx, ctx->chip->regs.status); ++ ++ trace_xdma_irq(status); ++ ++ if (status & ctx->chip->status_bits.ds_dirty) { ++ aspeed_xdma_done(ctx, true); ++ } else { ++ if (status & ctx->chip->status_bits.us_comp) { ++ if (ctx->upstream) ++ aspeed_xdma_done(ctx, false); ++ } ++ ++ if (status & ctx->chip->status_bits.ds_comp) { ++ if (!ctx->upstream) ++ aspeed_xdma_done(ctx, false); ++ } ++ } ++ ++ aspeed_xdma_writel(ctx, ctx->chip->regs.status, status); ++ spin_unlock(&ctx->engine_lock); ++ ++ return IRQ_HANDLED; ++} ++ ++static void aspeed_xdma_reset(struct aspeed_xdma *ctx) ++{ ++ unsigned long flags; ++ ++ trace_xdma_reset(ctx); ++ ++ reset_control_assert(ctx->reset); ++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US, ++ XDMA_ENGINE_SETUP_TIME_MAX_US); ++ reset_control_deassert(ctx->reset); ++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US, ++ XDMA_ENGINE_SETUP_TIME_MAX_US); ++ ++ aspeed_xdma_init_eng(ctx); ++ ++ aspeed_xdma_done(ctx, true); ++ ++ spin_lock_irqsave(&ctx->engine_lock, flags); ++ ctx->in_reset = false; ++ spin_unlock_irqrestore(&ctx->engine_lock, flags); ++ ++ wake_up_interruptible(&ctx->wait); ++} ++ ++static void aspeed_xdma_reset_work(struct work_struct *work) ++{ ++ struct aspeed_xdma *ctx = container_of(work, struct aspeed_xdma, ++ reset_work); ++ ++ aspeed_xdma_reset(ctx); ++} ++ ++static irqreturn_t aspeed_xdma_pcie_irq(int irq, void *arg) ++{ ++ struct aspeed_xdma *ctx = arg; ++ ++ trace_xdma_perst(ctx); ++ ++ spin_lock(&ctx->engine_lock); ++ if (ctx->in_reset) { ++ spin_unlock(&ctx->engine_lock); ++ return IRQ_HANDLED; ++ } ++ ++ ctx->in_reset = true; ++ spin_unlock(&ctx->engine_lock); ++ ++ schedule_work(&ctx->reset_work); ++ return IRQ_HANDLED; ++} ++ ++static ssize_t aspeed_xdma_write(struct file *file, const char __user *buf, ++ size_t len, loff_t *offset) ++{ ++ int rc; ++ unsigned int num_cmds; ++ struct aspeed_xdma_op op; ++ struct aspeed_xdma_cmd cmds[2]; ++ struct aspeed_xdma_client *client = file->private_data; ++ struct aspeed_xdma *ctx = client->ctx; ++ ++ if (len != sizeof(op)) ++ return -EINVAL; ++ ++ if (READ_ONCE(client->in_progress)) ++ return -EBUSY; ++ ++ if (copy_from_user(&op, buf, len)) ++ return -EFAULT; ++ ++ if (!op.len || op.len > client->size || ++ op.direction > ASPEED_XDMA_DIRECTION_UPSTREAM) ++ return -EINVAL; ++ ++ num_cmds = ctx->chip->set_cmd(ctx, cmds, &op, client->phys); ++ do { ++ rc = aspeed_xdma_start(ctx, num_cmds, cmds, !!op.direction, ++ client); ++ if (!rc) ++ break; ++ ++ if ((file->f_flags & O_NONBLOCK) || rc != -EBUSY) ++ return rc; ++ ++ rc = wait_event_interruptible(ctx->wait, ++ !(ctx->current_client || ++ ctx->in_reset)); ++ } while (!rc); ++ ++ if (rc) ++ return -EINTR; ++ ++ if (!(file->f_flags & O_NONBLOCK)) { ++ rc = wait_event_interruptible(ctx->wait, !client->in_progress); ++ if (rc) ++ return -EINTR; ++ ++ if (client->error) ++ return -EIO; ++ } ++ ++ return len; ++} ++ ++static __poll_t aspeed_xdma_poll(struct file *file, ++ struct poll_table_struct *wait) ++{ ++ __poll_t mask = 0; ++ __poll_t req = poll_requested_events(wait); ++ struct aspeed_xdma_client *client = file->private_data; ++ struct aspeed_xdma *ctx = client->ctx; ++ ++ if (req & (EPOLLIN | EPOLLRDNORM)) { ++ if (READ_ONCE(client->in_progress)) ++ poll_wait(file, &ctx->wait, wait); ++ ++ if (!READ_ONCE(client->in_progress)) { ++ if (READ_ONCE(client->error)) ++ mask |= EPOLLERR; ++ else ++ mask |= EPOLLIN | EPOLLRDNORM; ++ } ++ } ++ ++ if (req & (EPOLLOUT | EPOLLWRNORM)) { ++ if (READ_ONCE(ctx->current_client)) ++ poll_wait(file, &ctx->wait, wait); ++ ++ if (!READ_ONCE(ctx->current_client)) ++ mask |= EPOLLOUT | EPOLLWRNORM; ++ } ++ ++ if (mask) ++ aspeed_xdma_reset(ctx); ++ ++ return mask; ++} ++ ++static long aspeed_xdma_ioctl(struct file *file, unsigned int cmd, ++ unsigned long param) ++{ ++ unsigned long flags; ++ struct aspeed_xdma_client *client = file->private_data; ++ struct aspeed_xdma *ctx = client->ctx; ++ ++ switch (cmd) { ++ case ASPEED_XDMA_IOCTL_RESET: ++ spin_lock_irqsave(&ctx->engine_lock, flags); ++ if (ctx->in_reset) { ++ spin_unlock_irqrestore(&ctx->engine_lock, flags); ++ return 0; ++ } ++ ++ ctx->in_reset = true; ++ spin_unlock_irqrestore(&ctx->engine_lock, flags); ++ ++ if (READ_ONCE(ctx->current_client)) ++ dev_warn(ctx->dev, ++ "User reset with transfer in progress.\n"); ++ ++ aspeed_xdma_reset(ctx); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void aspeed_xdma_vma_close(struct vm_area_struct *vma) ++{ ++ int rc; ++ struct aspeed_xdma_client *client = vma->vm_private_data; ++ ++ rc = wait_event_interruptible(client->ctx->wait, !client->in_progress); ++ if (rc) ++ return; ++ ++ gen_pool_free(client->ctx->pool, (unsigned long)client->virt, ++ client->size); ++ trace_xdma_unmap(client); ++ ++ client->virt = NULL; ++ client->phys = 0; ++ client->size = 0; ++} ++ ++static const struct vm_operations_struct aspeed_xdma_vm_ops = { ++ .close = aspeed_xdma_vma_close, ++}; ++ ++static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ int rc; ++ struct aspeed_xdma_client *client = file->private_data; ++ struct aspeed_xdma *ctx = client->ctx; ++ ++ /* restrict file to one mapping */ ++ if (client->size) ++ return -EBUSY; ++ ++ client->size = vma->vm_end - vma->vm_start; ++ client->virt = gen_pool_dma_alloc(ctx->pool, client->size, ++ &client->phys); ++ if (!client->virt) { ++ trace_xdma_mmap_error(client, 0UL); ++ client->phys = 0; ++ client->size = 0; ++ return -ENOMEM; ++ } ++ ++ vma->vm_pgoff = (client->phys - ctx->mem_phys) >> PAGE_SHIFT; ++ vma->vm_ops = &aspeed_xdma_vm_ops; ++ vma->vm_private_data = client; ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ rc = io_remap_pfn_range(vma, vma->vm_start, client->phys >> PAGE_SHIFT, ++ client->size, vma->vm_page_prot); ++ if (rc) { ++ dev_warn(ctx->dev, "mmap err: v[%08lx] to p[%pa], s[%08x]\n", ++ vma->vm_start, &client->phys, client->size); ++ ++ gen_pool_free(ctx->pool, (unsigned long)client->virt, ++ client->size); ++ ++ trace_xdma_mmap_error(client, vma->vm_start); ++ client->virt = NULL; ++ client->phys = 0; ++ client->size = 0; ++ return rc; ++ } ++ ++ trace_xdma_mmap(client); ++ dev_dbg(ctx->dev, "mmap: v[%08lx] to p[%pa], s[%08x]\n", ++ vma->vm_start, &client->phys, client->size); ++ ++ return 0; ++} ++ ++static int aspeed_xdma_open(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *misc = file->private_data; ++ struct aspeed_xdma *ctx = container_of(misc, struct aspeed_xdma, misc); ++ struct aspeed_xdma_client *client = kzalloc(sizeof(*client), ++ GFP_KERNEL); ++ ++ if (!client) ++ return -ENOMEM; ++ ++ kobject_get(&ctx->kobj); ++ client->ctx = ctx; ++ file->private_data = client; ++ return 0; ++} ++ ++static int aspeed_xdma_release(struct inode *inode, struct file *file) ++{ ++ bool reset = false; ++ unsigned long flags; ++ struct aspeed_xdma_client *client = file->private_data; ++ struct aspeed_xdma *ctx = client->ctx; ++ ++ spin_lock_irqsave(&ctx->client_lock, flags); ++ if (client == ctx->current_client) { ++ spin_lock(&ctx->engine_lock); ++ if (ctx->in_reset) { ++ ctx->current_client = NULL; ++ } else { ++ ctx->in_reset = true; ++ reset = true; ++ } ++ spin_unlock(&ctx->engine_lock); ++ } ++ spin_unlock_irqrestore(&ctx->client_lock, flags); ++ ++ if (reset) ++ aspeed_xdma_reset(ctx); ++ ++ if (client->virt) { ++ gen_pool_free(ctx->pool, (unsigned long)client->virt, ++ client->size); ++ trace_xdma_unmap(client); ++ } ++ ++ kfree(client); ++ kobject_put(&ctx->kobj); ++ return 0; ++} ++ ++static const struct file_operations aspeed_xdma_fops = { ++ .owner = THIS_MODULE, ++ .write = aspeed_xdma_write, ++ .poll = aspeed_xdma_poll, ++ .unlocked_ioctl = aspeed_xdma_ioctl, ++ .mmap = aspeed_xdma_mmap, ++ .open = aspeed_xdma_open, ++ .release = aspeed_xdma_release, ++}; ++ ++static int aspeed_xdma_init_scu(struct aspeed_xdma *ctx, struct device *dev) ++{ ++ struct regmap *scu = syscon_regmap_lookup_by_phandle(dev->of_node, ++ "aspeed,scu"); ++ ++ if (!IS_ERR(scu)) { ++ u32 selection; ++ bool pcie_device_bmc = true; ++ const u32 bmc = SCU_PCIE_CONF_BMC_EN | ++ SCU_PCIE_CONF_BMC_EN_MSI | SCU_PCIE_CONF_BMC_EN_IRQ | ++ SCU_PCIE_CONF_BMC_EN_DMA; ++ const u32 vga = SCU_PCIE_CONF_VGA_EN | ++ SCU_PCIE_CONF_VGA_EN_MSI | SCU_PCIE_CONF_VGA_EN_IRQ | ++ SCU_PCIE_CONF_VGA_EN_DMA; ++ const char *pcie = NULL; ++ ++ if (!of_property_read_string(dev->of_node, ++ "aspeed,pcie-device", &pcie)) { ++ if (!strcmp(pcie, "vga")) { ++ pcie_device_bmc = false; ++ } else if (strcmp(pcie, "bmc")) { ++ dev_err(dev, ++ "Invalid pcie-device property %s.\n", ++ pcie); ++ return -EINVAL; ++ } ++ } ++ ++ if (pcie_device_bmc) { ++ selection = bmc; ++ regmap_update_bits(scu, ctx->chip->scu_bmc_class, ++ SCU_BMC_CLASS_REV_MASK, ++ SCU_BMC_CLASS_REV_XDMA); ++ } else { ++ selection = vga; ++ } ++ ++ regmap_update_bits(scu, ctx->chip->scu_pcie_conf, bmc | vga, ++ selection); ++ ++ if (ctx->chip->scu_misc_ctrl) { ++ regmap_update_bits(scu, ctx->chip->scu_misc_ctrl, ++ ctx->chip->scu_misc_mask, ++ ctx->chip->scu_misc_mask); ++ ++ regmap_update_bits(scu, SCU_AST2600_DEBUG_CTRL, ++ ctx->chip->scu_disable_mask, 0); ++ } ++ ++ if (ctx->chip->scu_pcie_ctrl) { ++ regmap_update_bits(scu, ctx->chip->scu_pcie_ctrl, ++ SCU_AST2700_PCIE_CTRL_DMA_EN, ++ SCU_AST2700_PCIE_CTRL_DMA_EN); ++ } ++ } else { ++ dev_warn(dev, "Unable to configure PCIe: %ld; continuing.\n", ++ PTR_ERR(scu)); ++ } ++ ++ return 0; ++} ++ ++static void aspeed_xdma_kobject_release(struct kobject *kobj) ++{ ++ struct aspeed_xdma *ctx = container_of(kobj, struct aspeed_xdma, kobj); ++ ++ if (ctx->pcie_irq >= 0) ++ free_irq(ctx->pcie_irq, ctx); ++ ++ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE); ++ ++ gen_pool_destroy(ctx->pool); ++ ++ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt, ++ ctx->mem_coherent); ++ ++ if (ctx->reset_rc) ++ reset_control_put(ctx->reset_rc); ++ reset_control_put(ctx->reset); ++ ++ clk_put(ctx->clock); ++ ++ free_irq(ctx->irq, ctx); ++ ++ iounmap(ctx->base); ++ release_mem_region(ctx->res_start, ctx->res_size); ++ ++ kfree(ctx); ++} ++ ++static const struct kobj_type aspeed_xdma_kobject_type = { ++ .release = aspeed_xdma_kobject_release, ++}; ++ ++static int aspeed_xdma_iomap(struct aspeed_xdma *ctx, ++ struct platform_device *pdev) ++{ ++ resource_size_t size; ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ if (!res) ++ return -ENOMEM; ++ ++ size = resource_size(res); ++ if (!request_mem_region(res->start, size, dev_name(ctx->dev))) ++ return -ENOMEM; ++ ++ ctx->base = ioremap(res->start, size); ++ if (!ctx->base) { ++ release_mem_region(res->start, size); ++ return -ENOMEM; ++ } ++ ++ ctx->res_start = res->start; ++ ctx->res_size = size; ++ ++ return 0; ++} ++ ++static int aspeed_xdma_probe(struct platform_device *pdev) ++{ ++ int rc, id; ++ struct aspeed_xdma *ctx; ++ struct reserved_mem *mem; ++ struct device *dev = &pdev->dev; ++ struct device_node *memory_region; ++ const void *md = of_device_get_match_data(dev); ++ bool rc_f; ++ ++ if (!md) ++ return -ENODEV; ++ ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) ++ return -ENOMEM; ++ ++ ctx->chip = md; ++ ctx->dev = dev; ++ platform_set_drvdata(pdev, ctx); ++ spin_lock_init(&ctx->client_lock); ++ spin_lock_init(&ctx->engine_lock); ++ INIT_WORK(&ctx->reset_work, aspeed_xdma_reset_work); ++ init_waitqueue_head(&ctx->wait); ++ ++ rc_f = of_find_property(dev->of_node, "pcie_rc", NULL) ? 1 : 0; ++ ++ rc = aspeed_xdma_iomap(ctx, pdev); ++ if (rc) { ++ dev_err(dev, "Failed to map registers.\n"); ++ goto err_nomap; ++ } ++ ++ ctx->irq = platform_get_irq(pdev, 0); ++ if (ctx->irq < 0) { ++ dev_err(dev, "Failed to find IRQ.\n"); ++ rc = ctx->irq; ++ goto err_noirq; ++ } ++ ++ rc = request_irq(ctx->irq, aspeed_xdma_irq, 0, dev_name(dev), ctx); ++ if (rc < 0) { ++ dev_err(dev, "Failed to request IRQ %d.\n", ctx->irq); ++ goto err_noirq; ++ } ++ ++ ctx->clock = clk_get(dev, NULL); ++ if (IS_ERR(ctx->clock)) { ++ dev_err(dev, "Failed to request clock.\n"); ++ rc = PTR_ERR(ctx->clock); ++ goto err_noclk; ++ } ++ ++ ctx->reset = reset_control_get_exclusive(dev, NULL); ++ if (IS_ERR(ctx->reset)) { ++ dev_err(dev, "Failed to request reset control.\n"); ++ rc = PTR_ERR(ctx->reset); ++ goto err_noreset; ++ } ++ ++ if (rc_f) { ++ ctx->reset_rc = reset_control_get_exclusive(dev, "root-complex"); ++ if (IS_ERR(ctx->reset_rc)) { ++ dev_dbg(dev, "Failed to request reset RC control.\n"); ++ ctx->reset_rc = NULL; ++ } ++ } ++ ++ memory_region = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (!memory_region) { ++ dev_err(dev, "Failed to find memory-region.\n"); ++ rc = -ENOMEM; ++ goto err_nomem; ++ } ++ ++ mem = of_reserved_mem_lookup(memory_region); ++ of_node_put(memory_region); ++ if (!mem) { ++ dev_err(dev, "Failed to find reserved memory.\n"); ++ rc = -ENOMEM; ++ goto err_nomem; ++ } ++ ++ ctx->mem_phys = mem->base; ++ ctx->mem_size = mem->size; ++ ++ rc = of_reserved_mem_device_init(dev); ++ if (rc) { ++ dev_err(dev, "Failed to init reserved memory.\n"); ++ goto err_nomem; ++ } ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); ++ if (rc) { ++ dev_err(dev, "Failed to mask DMA.\n"); ++ goto err_nomem; ++ } ++ ++ ctx->mem_virt = dma_alloc_coherent(dev, ctx->mem_size, ++ &ctx->mem_coherent, __GFP_NOWARN); ++ if (!ctx->mem_virt) { ++ dev_err(dev, "Failed to allocate reserved memory.\n"); ++ rc = -ENOMEM; ++ goto err_nomem; ++ } ++ ++ ctx->pool = gen_pool_create(ilog2(PAGE_SIZE), -1); ++ if (!ctx->pool) { ++ dev_err(dev, "Failed to setup genalloc pool.\n"); ++ rc = -ENOMEM; ++ goto err_nopool; ++ } ++ ++ rc = gen_pool_add_virt(ctx->pool, (unsigned long)ctx->mem_virt, ++ ctx->mem_phys, ctx->mem_size, -1); ++ if (rc) { ++ dev_err(ctx->dev, "Failed to add memory to genalloc pool.\n"); ++ goto err_pool_scu_clk; ++ } ++ ++ rc = aspeed_xdma_init_scu(ctx, dev); ++ if (rc) ++ goto err_pool_scu_clk; ++ ++ rc = clk_prepare_enable(ctx->clock); ++ if (rc) { ++ dev_err(dev, "Failed to enable the clock.\n"); ++ goto err_pool_scu_clk; ++ } ++ ++ if (ctx->reset_rc) { ++ rc = reset_control_deassert(ctx->reset_rc); ++ if (rc) { ++ dev_err(dev, "Failed to clear the RC reset.\n"); ++ goto err_reset_rc; ++ } ++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US, ++ XDMA_ENGINE_SETUP_TIME_MAX_US); ++ } ++ ++ rc = reset_control_deassert(ctx->reset); ++ if (rc) { ++ dev_err(dev, "Failed to clear the reset.\n"); ++ goto err_reset; ++ } ++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US, ++ XDMA_ENGINE_SETUP_TIME_MAX_US); ++ ++ ctx->cmdq = gen_pool_dma_alloc(ctx->pool, XDMA_CMDQ_SIZE, ++ &ctx->cmdq_phys); ++ if (!ctx->cmdq) { ++ dev_err(ctx->dev, "Failed to genalloc cmdq.\n"); ++ rc = -ENOMEM; ++ goto err_pool; ++ } ++ ++ aspeed_xdma_init_eng(ctx); ++ ++ id = of_alias_get_id(dev->of_node, "xdma"); ++ if (id < 0) ++ id = 0; ++ ++ ctx->misc.minor = MISC_DYNAMIC_MINOR; ++ ctx->misc.fops = &aspeed_xdma_fops; ++ ctx->misc.name = kasprintf(GFP_KERNEL, "%s%d", DEVICE_NAME, id); ++ ctx->misc.parent = dev; ++ rc = misc_register(&ctx->misc); ++ if (rc) { ++ dev_err(dev, "Failed to register xdma miscdevice.\n"); ++ goto err_misc; ++ } ++ ++ /* ++ * This interrupt could fire immediately so only request it once the ++ * engine and driver are initialized. ++ */ ++ ctx->pcie_irq = platform_get_irq(pdev, 1); ++ if (ctx->pcie_irq < 0) { ++ dev_warn(dev, "Failed to find PCI-E IRQ.\n"); ++ } else { ++ rc = request_irq(ctx->pcie_irq, aspeed_xdma_pcie_irq, ++ IRQF_SHARED, dev_name(dev), ctx); ++ if (rc < 0) { ++ dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", rc); ++ ctx->pcie_irq = -1; ++ } ++ } ++ ++ kobject_init(&ctx->kobj, &aspeed_xdma_kobject_type); ++ return 0; ++ ++err_misc: ++ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE); ++err_pool: ++ reset_control_assert(ctx->reset); ++err_reset: ++ if (ctx->reset_rc) ++ reset_control_assert(ctx->reset_rc); ++err_reset_rc: ++ clk_disable_unprepare(ctx->clock); ++err_pool_scu_clk: ++ gen_pool_destroy(ctx->pool); ++err_nopool: ++ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt, ++ ctx->mem_coherent); ++err_nomem: ++ if (ctx->reset_rc) ++ reset_control_put(ctx->reset_rc); ++ reset_control_put(ctx->reset); ++err_noreset: ++ clk_put(ctx->clock); ++err_noclk: ++ free_irq(ctx->irq, ctx); ++err_noirq: ++ iounmap(ctx->base); ++ release_mem_region(ctx->res_start, ctx->res_size); ++err_nomap: ++ kfree(ctx); ++ return rc; ++} ++ ++static void aspeed_xdma_remove(struct platform_device *pdev) ++{ ++ struct aspeed_xdma *ctx = platform_get_drvdata(pdev); ++ ++ reset_control_assert(ctx->reset); ++ if (ctx->reset_rc) ++ reset_control_assert(ctx->reset_rc); ++ clk_disable_unprepare(ctx->clock); ++ ++ aspeed_xdma_done(ctx, true); ++ ++ misc_deregister(&ctx->misc); ++ kobject_put(&ctx->kobj); ++} ++ ++static const struct aspeed_xdma_chip aspeed_ast2500_xdma_chip = { ++ .control = XDMA_AST2500_CTRL_US_COMP | XDMA_AST2500_CTRL_DS_COMP | ++ XDMA_AST2500_CTRL_DS_DIRTY | XDMA_AST2500_CTRL_DS_SIZE_256 | ++ XDMA_AST2500_CTRL_DS_TIMEOUT | XDMA_AST2500_CTRL_DS_CHECK_ID, ++ .scu_bmc_class = SCU_AST2500_BMC_CLASS_REV, ++ .scu_misc_ctrl = 0, ++ .scu_pcie_conf = SCU_AST2500_PCIE_CONF, ++ .scu_pcie_ctrl = 0, ++ .queue_entry_size = XDMA_AST2500_QUEUE_ENTRY_SIZE, ++ .regs = { ++ .bmc_cmdq_addr = XDMA_AST2500_BMC_CMDQ_ADDR, ++ .bmc_cmdq_addr_ext = 0, ++ .bmc_cmdq_endp = XDMA_AST2500_BMC_CMDQ_ENDP, ++ .bmc_cmdq_writep = XDMA_AST2500_BMC_CMDQ_WRITEP, ++ .bmc_cmdq_readp = XDMA_AST2500_BMC_CMDQ_READP, ++ .control = XDMA_AST2500_CTRL, ++ .status = XDMA_AST2500_STATUS, ++ }, ++ .status_bits = { ++ .us_comp = XDMA_AST2500_STATUS_US_COMP, ++ .ds_comp = XDMA_AST2500_STATUS_DS_COMP, ++ .ds_dirty = XDMA_AST2500_STATUS_DS_DIRTY, ++ }, ++ .set_cmd = aspeed_xdma_ast2500_set_cmd, ++}; ++ ++static const struct aspeed_xdma_chip aspeed_ast2600_xdma_chip = { ++ .control = XDMA_AST2600_CTRL_US_COMP | XDMA_AST2600_CTRL_DS_COMP | ++ XDMA_AST2600_CTRL_DS_DIRTY | XDMA_AST2600_CTRL_DS_SIZE_256, ++ .scu_bmc_class = SCU_AST2600_BMC_CLASS_REV, ++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL, ++ .scu_misc_mask = SCU_AST2600_MISC_CTRL_XDMA_BMC, ++ .scu_disable_mask = DEBUG_CTRL_AST2600_XDMA_DISABLE, ++ .scu_pcie_conf = SCU_AST2600_PCIE_CONF, ++ .scu_pcie_ctrl = 0, ++ .queue_entry_size = XDMA_AST2600_QUEUE_ENTRY_SIZE, ++ .regs = { ++ .bmc_cmdq_addr = XDMA_AST2600_BMC_CMDQ_ADDR, ++ .bmc_cmdq_addr_ext = 0, ++ .bmc_cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP, ++ .bmc_cmdq_writep = XDMA_AST2600_BMC_CMDQ_WRITEP, ++ .bmc_cmdq_readp = XDMA_AST2600_BMC_CMDQ_READP, ++ .control = XDMA_AST2600_CTRL, ++ .status = XDMA_AST2600_STATUS, ++ }, ++ .status_bits = { ++ .us_comp = XDMA_AST2600_STATUS_US_COMP, ++ .ds_comp = XDMA_AST2600_STATUS_DS_COMP, ++ .ds_dirty = XDMA_AST2600_STATUS_DS_DIRTY, ++ }, ++ .set_cmd = aspeed_xdma_ast2600_set_cmd, ++}; ++ ++static const struct aspeed_xdma_chip aspeed_ast2700_xdma0_chip = { ++ .control = XDMA_AST2700_CTRL_US_COMP | XDMA_AST2700_CTRL_DS_COMP | ++ XDMA_AST2700_CTRL_DS_DIRTY, ++ .scu_bmc_class = SCU_AST2700_PCIE0_BMC_CLASS_REV, ++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL, ++ .scu_misc_mask = SCU_AST2700_MISC_CTRL_XDMA_CLIENT, ++ .scu_disable_mask = DEBUG_CTRL_AST2600_XDMA_DISABLE | DEBUG_CTRL_AST2700_XDMA_DISABLE, ++ .scu_pcie_conf = SCU_AST2700_PCIE0_CONF, ++ .scu_pcie_ctrl = SCU_AST2700_PCIE0_CTRL, ++ .queue_entry_size = XDMA_AST2700_QUEUE_ENTRY_SIZE, ++ .regs = { ++ .bmc_cmdq_addr = XDMA_AST2700_BMC_CMDQ_ADDR0, ++ .bmc_cmdq_addr_ext = XDMA_AST2700_BMC_CMDQ_ADDR1, ++ .bmc_cmdq_endp = XDMA_AST2700_BMC_CMDQ_ENDP, ++ .bmc_cmdq_writep = XDMA_AST2700_BMC_CMDQ_WRITEP, ++ .bmc_cmdq_readp = XDMA_AST2700_BMC_CMDQ_READP, ++ .control = XDMA_AST2700_CTRL, ++ .status = XDMA_AST2700_STATUS, ++ }, ++ .status_bits = { ++ .us_comp = XDMA_AST2700_STATUS_US_COMP, ++ .ds_comp = XDMA_AST2700_STATUS_DS_COMP, ++ .ds_dirty = XDMA_AST2700_STATUS_DS_DIRTY, ++ }, ++ .set_cmd = aspeed_xdma_ast2700_set_cmd, ++}; ++ ++static const struct aspeed_xdma_chip aspeed_ast2700_xdma1_chip = { ++ .control = XDMA_AST2700_CTRL_US_COMP | XDMA_AST2700_CTRL_DS_COMP | ++ XDMA_AST2700_CTRL_DS_DIRTY, ++ .scu_bmc_class = SCU_AST2700_PCIE1_BMC_CLASS_REV, ++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL, ++ .scu_misc_mask = SCU_AST2700_MISC_CTRL_XDMA_CLIENT, ++ .scu_disable_mask = DEBUG_CTRL_AST2600_XDMA_DISABLE | DEBUG_CTRL_AST2700_XDMA_DISABLE, ++ .scu_pcie_conf = SCU_AST2700_PCIE1_CONF, ++ .scu_pcie_ctrl = SCU_AST2700_PCIE1_CTRL, ++ .queue_entry_size = XDMA_AST2700_QUEUE_ENTRY_SIZE, ++ .regs = { ++ .bmc_cmdq_addr = XDMA_AST2700_BMC_CMDQ_ADDR0, ++ .bmc_cmdq_addr_ext = XDMA_AST2700_BMC_CMDQ_ADDR1, ++ .bmc_cmdq_endp = XDMA_AST2700_BMC_CMDQ_ENDP, ++ .bmc_cmdq_writep = XDMA_AST2700_BMC_CMDQ_WRITEP, ++ .bmc_cmdq_readp = XDMA_AST2700_BMC_CMDQ_READP, ++ .control = XDMA_AST2700_CTRL, ++ .status = XDMA_AST2700_STATUS, ++ }, ++ .status_bits = { ++ .us_comp = XDMA_AST2700_STATUS_US_COMP, ++ .ds_comp = XDMA_AST2700_STATUS_DS_COMP, ++ .ds_dirty = XDMA_AST2700_STATUS_DS_DIRTY, ++ }, ++ .set_cmd = aspeed_xdma_ast2700_set_cmd, ++}; ++ ++static const struct of_device_id aspeed_xdma_match[] = { ++ { ++ .compatible = "aspeed,ast2500-xdma", ++ .data = &aspeed_ast2500_xdma_chip, ++ }, ++ { ++ .compatible = "aspeed,ast2600-xdma", ++ .data = &aspeed_ast2600_xdma_chip, ++ }, ++ { ++ .compatible = "aspeed,ast2700-xdma0", ++ .data = &aspeed_ast2700_xdma0_chip, ++ }, ++ { ++ .compatible = "aspeed,ast2700-xdma1", ++ .data = &aspeed_ast2700_xdma1_chip, ++ }, ++ { }, ++}; ++ ++static struct platform_driver aspeed_xdma_driver = { ++ .probe = aspeed_xdma_probe, ++ .remove = aspeed_xdma_remove, ++ .driver = { ++ .name = DEVICE_NAME, ++ .of_match_table = aspeed_xdma_match, ++ }, ++}; ++ ++module_platform_driver(aspeed_xdma_driver); ++ ++MODULE_AUTHOR("Eddie James"); ++MODULE_DESCRIPTION("ASPEED XDMA Engine Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/ast2500-espi.c b/drivers/soc/aspeed/ast2500-espi.c +--- a/drivers/soc/aspeed/ast2500-espi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2500-espi.c 2025-12-23 10:16:21.125032653 +0000 +@@ -0,0 +1,1739 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ast2500-espi.h" ++ ++#define DEVICE_NAME "aspeed-espi" ++ ++#define PERIF_MCYC_UNLOCK 0xfedc756e ++#define PERIF_MCYC_ALIGN SZ_64K ++ ++#define FLASH_SAFS_ALIGN SZ_16M ++ ++struct ast2500_espi_perif { ++ struct { ++ bool enable; ++ void *virt; ++ dma_addr_t taddr; ++ uint32_t saddr; ++ uint32_t size; ++ } mcyc; ++ ++ struct { ++ bool enable; ++ void *np_tx_virt; ++ dma_addr_t np_tx_addr; ++ void *pc_tx_virt; ++ dma_addr_t pc_tx_addr; ++ void *pc_rx_virt; ++ dma_addr_t pc_rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex np_tx_mtx; ++ struct mutex pc_tx_mtx; ++ struct mutex pc_rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2500_espi_vw { ++ struct { ++ bool hw_mode; ++ uint32_t val; ++ } gpio; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2500_espi_oob { ++ struct { ++ bool enable; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex tx_mtx; ++ struct mutex rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2500_espi_flash { ++ struct { ++ uint32_t mode; ++ phys_addr_t taddr; ++ uint32_t size; ++ } safs; ++ ++ struct { ++ bool enable; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex rx_mtx; ++ struct mutex tx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2500_espi { ++ struct device *dev; ++ void __iomem *regs; ++ struct clk *clk; ++ int irq; ++ ++ struct ast2500_espi_perif perif; ++ struct ast2500_espi_vw vw; ++ struct ast2500_espi_oob oob; ++ struct ast2500_espi_flash flash; ++}; ++ ++/* peripheral channel (CH0) */ ++static long ast2500_espi_perif_pc_get_rx(struct file *fp, ++ struct ast2500_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2500_espi, perif); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&perif->pc_rx_mtx)) ++ return -EAGAIN; ++ ++ if (!perif->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&perif->pc_rx_mtx); ++ ++ if (!perif->rx_ready) { ++ rc = wait_event_interruptible(perif->wq, perif->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_CTRL); ++ cyc = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_PERIF_MSG: ++ pkt_len = sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_MSG_D: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE: ++ case ESPI_PERIF_SUC_CMPLT_D_FIRST: ++ case ESPI_PERIF_SUC_CMPLT_D_LAST: ++ case ESPI_PERIF_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_cmplt); ++ break; ++ case ESPI_PERIF_SUC_CMPLT: ++ case ESPI_PERIF_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_perif_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (perif->dma.enable) { ++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ ++ writel(ESPI_PERIF_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_PERIF_PC_RX_CTRL); ++ perif->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&perif->pc_rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_perif_pc_put_tx(struct file *fp, ++ struct ast2500_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2500_espi, perif); ++ ++ if (!mutex_trylock(&perif->pc_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_PERIF_PC_TX_CTRL); ++ if (reg & ESPI_PERIF_PC_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_PERIF_PC_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_LEN, len) ++ | ESPI_PERIF_PC_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_PERIF_PC_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->pc_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_perif_np_put_tx(struct file *fp, ++ struct ast2500_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2500_espi, perif); ++ ++ if (!mutex_trylock(&perif->np_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_PERIF_NP_TX_CTRL); ++ if (reg & ESPI_PERIF_NP_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_PERIF_NP_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_LEN, len) ++ | ESPI_PERIF_NP_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_PERIF_NP_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->np_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2500_espi_perif *perif; ++ struct aspeed_espi_ioc ioc; ++ ++ perif = container_of(fp->private_data, struct ast2500_espi_perif, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_PERIF_PC_GET_RX: ++ return ast2500_espi_perif_pc_get_rx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_PC_PUT_TX: ++ return ast2500_espi_perif_pc_put_tx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_NP_PUT_TX: ++ return ast2500_espi_perif_np_put_tx(fp, perif, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static int ast2500_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2500_espi_perif *perif; ++ unsigned long vm_size; ++ pgprot_t vm_prot; ++ ++ perif = container_of(fp->private_data, struct ast2500_espi_perif, mdev); ++ if (!perif->mcyc.enable) ++ return -EPERM; ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ vm_prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size) ++ return -EINVAL; ++ ++ vm_prot = pgprot_noncached(vm_prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, vm_prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2500_espi_perif_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2500_espi_perif_mmap, ++ .unlocked_ioctl = ast2500_espi_perif_ioctl, ++}; ++ ++static void ast2500_espi_perif_isr(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_perif *perif; ++ unsigned long flags; ++ uint32_t sts; ++ ++ perif = &espi->perif; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_PERIF_PC_RX_CMPLT) { ++ writel(ESPI_INT_STS_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ perif->rx_ready = true; ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ wake_up_interruptible(&perif->wq); ++ } ++} ++ ++static void ast2500_espi_perif_reset(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_perif *perif; ++ struct device *dev; ++ uint32_t reg, mask; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_PERIF); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ writel(ESPI_INT_STS_PERIF, espi->regs + ESPI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_SW_RST ++ | ESPI_CTRL_PERIF_NP_RX_SW_RST ++ | ESPI_CTRL_PERIF_PC_TX_SW_RST ++ | ESPI_CTRL_PERIF_PC_RX_SW_RST ++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN ++ | ESPI_CTRL_PERIF_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_PERIF_NP_TX_SW_RST ++ | ESPI_CTRL_PERIF_NP_RX_SW_RST ++ | ESPI_CTRL_PERIF_PC_TX_SW_RST ++ | ESPI_CTRL_PERIF_PC_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (perif->mcyc.enable) { ++ mask = ~(perif->mcyc.size - 1); ++ writel(PERIF_MCYC_UNLOCK, espi->regs + ESPI_PERIF_MCYC_MASK); ++ writel(mask, espi->regs + ESPI_PERIF_MCYC_MASK); ++ ++ writel(perif->mcyc.saddr, espi->regs + ESPI_PERIF_MCYC_SADDR); ++ writel(perif->mcyc.taddr, espi->regs + ESPI_PERIF_MCYC_TADDR); ++ } ++ ++ if (perif->dma.enable) { ++ writel(perif->dma.np_tx_addr, espi->regs + ESPI_PERIF_NP_TX_DMA); ++ writel(perif->dma.pc_tx_addr, espi->regs + ESPI_PERIF_PC_TX_DMA); ++ writel(perif->dma.pc_rx_addr, espi->regs + ESPI_PERIF_PC_RX_DMA); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_PERIF_PC_RX_CMPLT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_PERIF_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2500_espi_perif_probe(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_perif *perif; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ init_waitqueue_head(&perif->wq); ++ ++ spin_lock_init(&perif->lock); ++ ++ mutex_init(&perif->np_tx_mtx); ++ mutex_init(&perif->pc_tx_mtx); ++ mutex_init(&perif->pc_rx_mtx); ++ ++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable"); ++ if (perif->mcyc.enable) { ++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr); ++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-size", &perif->mcyc.size); ++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n"); ++ return -EINVAL; ++ } ++ ++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size, ++ &perif->mcyc.taddr, GFP_KERNEL); ++ if (!perif->mcyc.virt) { ++ dev_err(dev, "cannot allocate memory cycle\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode"); ++ if (perif->dma.enable) { ++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_tx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_tx_virt) { ++ dev_err(dev, "cannot allocate posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_rx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_rx_virt) { ++ dev_err(dev, "cannot allocate posted RX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.np_tx_addr, GFP_KERNEL); ++ if (!perif->dma.np_tx_virt) { ++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ perif->mdev.parent = dev; ++ perif->mdev.minor = MISC_DYNAMIC_MINOR; ++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral", DEVICE_NAME); ++ perif->mdev.fops = &ast2500_espi_perif_fops; ++ rc = misc_register(&perif->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", perif->mdev.name); ++ return rc; ++ } ++ ++ ast2500_espi_perif_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2500_espi_perif_remove(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_perif *perif; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_PERIF); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN ++ | ESPI_CTRL_PERIF_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (perif->mcyc.enable) ++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt, ++ perif->mcyc.taddr); ++ ++ if (perif->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt, ++ perif->dma.np_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt, ++ perif->dma.pc_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt, ++ perif->dma.pc_rx_addr); ++ } ++ ++ mutex_destroy(&perif->np_tx_mtx); ++ mutex_destroy(&perif->pc_tx_mtx); ++ mutex_destroy(&perif->pc_rx_mtx); ++ ++ misc_deregister(&perif->mdev); ++ ++ return 0; ++} ++ ++/* virtual wire channel (CH1) */ ++static long ast2500_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2500_espi_vw *vw; ++ struct ast2500_espi *espi; ++ uint32_t gpio; ++ ++ vw = container_of(fp->private_data, struct ast2500_espi_vw, mdev); ++ espi = container_of(vw, struct ast2500_espi, vw); ++ gpio = vw->gpio.val; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_VW_GET_GPIO_VAL: ++ if (put_user(gpio, (uint32_t __user *)arg)) ++ return -EFAULT; ++ break; ++ case ASPEED_ESPI_VW_PUT_GPIO_VAL: ++ if (get_user(gpio, (uint32_t __user *)arg)) ++ return -EFAULT; ++ ++ writel(gpio, espi->regs + ESPI_VW_GPIO_VAL); ++ break; ++ default: ++ return -EINVAL; ++ }; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2500_espi_vw_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2500_espi_vw_ioctl, ++}; ++ ++static void ast2500_espi_vw_isr(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_vw *vw; ++ uint32_t reg, sts, sts_sysevt; ++ ++ vw = &espi->vw; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_VW_SYSEVT) { ++ sts_sysevt = readl(espi->regs + ESPI_VW_SYSEVT_INT_STS); ++ ++ if (sts_sysevt & ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN) { ++ reg = readl(espi->regs + ESPI_VW_SYSEVT) | ESPI_VW_SYSEVT_HOST_RST_ACK; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT); ++ writel(ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN, espi->regs + ESPI_VW_SYSEVT_INT_STS); ++ } ++ ++ if (sts_sysevt & ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN) { ++ reg = readl(espi->regs + ESPI_VW_SYSEVT) | ESPI_VW_SYSEVT_OOB_RST_ACK; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT); ++ writel(ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN, espi->regs + ESPI_VW_SYSEVT_INT_STS); ++ } ++ ++ writel(ESPI_INT_STS_VW_SYSEVT, espi->regs + ESPI_INT_STS); ++ } ++ ++ if (sts & ESPI_INT_STS_VW_SYSEVT1) { ++ sts_sysevt = readl(espi->regs + ESPI_VW_SYSEVT1_INT_STS); ++ ++ if (sts_sysevt & ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN) { ++ reg = readl(espi->regs + ESPI_VW_SYSEVT1) | ESPI_VW_SYSEVT1_SUSPEND_ACK; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT1); ++ writel(ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN, espi->regs + ESPI_VW_SYSEVT1_INT_STS); ++ } ++ ++ writel(ESPI_INT_STS_VW_SYSEVT1, espi->regs + ESPI_INT_STS); ++ } ++ ++ if (sts & ESPI_INT_STS_VW_GPIO) { ++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL); ++ writel(ESPI_INT_STS_VW_GPIO, espi->regs + ESPI_INT_STS); ++ } ++} ++ ++static void ast2500_espi_vw_reset(struct ast2500_espi *espi) ++{ ++ uint32_t reg; ++ struct ast2500_espi_vw *vw = &espi->vw; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_VW); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ writel(ESPI_INT_STS_VW, espi->regs + ESPI_INT_STS); ++ ++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL); ++ ++ /* Host Reset Warn and OOB Reset Warn system events */ ++ reg = readl(espi->regs + ESPI_VW_SYSEVT_INT_T2) ++ | ESPI_VW_SYSEVT_INT_T2_HOST_RST_WARN ++ | ESPI_VW_SYSEVT_INT_T2_OOB_RST_WARN; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT_INT_T2); ++ ++ reg = readl(espi->regs + ESPI_VW_SYSEVT_INT_EN) ++ | ESPI_VW_SYSEVT_INT_EN_HOST_RST_WARN ++ | ESPI_VW_SYSEVT_INT_EN_OOB_RST_WARN; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT_INT_EN); ++ ++ /* Suspend Warn system event */ ++ reg = readl(espi->regs + ESPI_VW_SYSEVT1_INT_T0) | ESPI_VW_SYSEVT1_INT_T0_SUSPEND_WARN; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT1_INT_T0); ++ ++ reg = readl(espi->regs + ESPI_VW_SYSEVT1_INT_EN) | ESPI_VW_SYSEVT1_INT_EN_SUSPEND_WARN; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT1_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_INT_EN) ++ | ESPI_INT_EN_VW_GPIO ++ | ESPI_INT_EN_VW_SYSEVT ++ | ESPI_INT_EN_VW_SYSEVT1; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_VW_SYSEVT) ++ | ESPI_VW_SYSEVT_SLV_BOOT_STS ++ | ESPI_VW_SYSEVT_SLV_BOOT_DONE; ++ writel(reg, espi->regs + ESPI_VW_SYSEVT); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ((vw->gpio.hw_mode) ? 0 : ESPI_CTRL_VW_GPIO_SW) ++ | ESPI_CTRL_VW_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2500_espi_vw_probe(struct ast2500_espi *espi) ++{ ++ int rc; ++ struct device *dev = espi->dev; ++ struct ast2500_espi_vw *vw = &espi->vw; ++ ++ writel(0x0, espi->regs + ESPI_VW_SYSEVT_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_VW_SYSEVT_INT_STS); ++ ++ writel(0x0, espi->regs + ESPI_VW_SYSEVT1_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_VW_SYSEVT1_INT_STS); ++ ++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode"); ++ ++ vw->mdev.parent = dev; ++ vw->mdev.minor = MISC_DYNAMIC_MINOR; ++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw", DEVICE_NAME); ++ vw->mdev.fops = &ast2500_espi_vw_fops; ++ rc = misc_register(&vw->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", vw->mdev.name); ++ return rc; ++ } ++ ++ ast2500_espi_vw_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2500_espi_vw_remove(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_vw *vw; ++ uint32_t reg; ++ ++ vw = &espi->vw; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_VW); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ misc_deregister(&vw->mdev); ++ ++ return 0; ++} ++ ++/* out-of-band channel (CH2) */ ++static long ast2500_espi_oob_get_rx(struct file *fp, ++ struct ast2500_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2500_espi, oob); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&oob->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!oob->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&oob->rx_mtx); ++ ++ if (!oob->rx_ready) { ++ rc = wait_event_interruptible(oob->wq, oob->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_OOB_RX_CTRL); ++ cyc = FIELD_GET(ESPI_OOB_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_OOB_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_OOB_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr); ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (oob->dma.enable) { ++ memcpy(hdr + 1, oob->dma.rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ pkt[i] = readl(espi->regs + ESPI_OOB_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ ++ writel(ESPI_OOB_RX_CTRL_SERV_PEND, espi->regs + ESPI_OOB_RX_CTRL); ++ oob->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_oob_put_tx(struct file *fp, ++ struct ast2500_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2500_espi, oob); ++ ++ if (!mutex_trylock(&oob->tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_OOB_TX_CTRL); ++ if (reg & ESPI_OOB_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (oob->dma.enable) { ++ memcpy(oob->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_OOB_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_OOB_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_OOB_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_OOB_TX_CTRL_LEN, len) ++ | ESPI_OOB_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_OOB_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2500_espi_oob *oob; ++ struct aspeed_espi_ioc ioc; ++ ++ oob = container_of(fp->private_data, struct ast2500_espi_oob, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_OOB_GET_RX: ++ return ast2500_espi_oob_get_rx(fp, oob, &ioc); ++ case ASPEED_ESPI_OOB_PUT_TX: ++ return ast2500_espi_oob_put_tx(fp, oob, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2500_espi_oob_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2500_espi_oob_ioctl, ++}; ++ ++static void ast2500_espi_oob_isr(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_oob *oob; ++ unsigned long flags; ++ uint32_t sts; ++ ++ oob = &espi->oob; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_OOB_RX_CMPLT) { ++ writel(ESPI_INT_STS_OOB_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ oob->rx_ready = true; ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ wake_up_interruptible(&oob->wq); ++ } ++} ++ ++static void ast2500_espi_oob_reset(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_oob *oob; ++ uint32_t reg; ++ ++ oob = &espi->oob; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_OOB); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ writel(ESPI_INT_STS_OOB, espi->regs + ESPI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_OOB_TX_SW_RST ++ | ESPI_CTRL_OOB_RX_SW_RST ++ | ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN ++ | ESPI_CTRL_OOB_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_OOB_TX_SW_RST | ESPI_CTRL_OOB_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (oob->dma.enable) { ++ writel(oob->dma.tx_addr, espi->regs + ESPI_OOB_TX_DMA); ++ writel(oob->dma.rx_addr, espi->regs + ESPI_OOB_RX_DMA); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_OOB_RX_CMPLT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_OOB_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2500_espi_oob_probe(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_oob *oob; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ init_waitqueue_head(&oob->wq); ++ ++ spin_lock_init(&oob->lock); ++ ++ mutex_init(&oob->tx_mtx); ++ mutex_init(&oob->rx_mtx); ++ ++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode"); ++ if (oob->dma.enable) { ++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &oob->dma.tx_addr, GFP_KERNEL); ++ if (!oob->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &oob->dma.rx_addr, GFP_KERNEL); ++ if (!oob->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ oob->mdev.parent = dev; ++ oob->mdev.minor = MISC_DYNAMIC_MINOR; ++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob", DEVICE_NAME); ++ oob->mdev.fops = &ast2500_espi_oob_fops; ++ rc = misc_register(&oob->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", oob->mdev.name); ++ return rc; ++ } ++ ++ ast2500_espi_oob_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2500_espi_oob_remove(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_oob *oob; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_OOB); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN ++ | ESPI_CTRL_OOB_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (oob->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, oob->dma.tx_virt, oob->dma.tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, oob->dma.rx_virt, oob->dma.rx_addr); ++ } ++ ++ mutex_destroy(&oob->tx_mtx); ++ mutex_destroy(&oob->rx_mtx); ++ ++ misc_deregister(&oob->mdev); ++ ++ return 0; ++} ++ ++/* flash channel (CH3) */ ++static long ast2500_espi_flash_get_rx(struct file *fp, ++ struct ast2500_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ rc = 0; ++ ++ espi = container_of(flash, struct ast2500_espi, flash); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&flash->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!flash->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&flash->rx_mtx); ++ ++ if (!flash->rx_ready) { ++ rc = wait_event_interruptible(flash->wq, flash->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_FLASH_RX_CTRL); ++ cyc = FIELD_GET(ESPI_FLASH_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_FLASH_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_FLASH_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_FLASH_WRITE: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_READ: ++ case ESPI_FLASH_ERASE: ++ pkt_len = sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE: ++ case ESPI_FLASH_SUC_CMPLT_D_FIRST: ++ case ESPI_FLASH_SUC_CMPLT_D_LAST: ++ case ESPI_FLASH_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_cmplt); ++ break; ++ case ESPI_FLASH_SUC_CMPLT: ++ case ESPI_FLASH_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_flash_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (flash->dma.enable) { ++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ pkt[i] = readl(espi->regs + ESPI_FLASH_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&flash->lock, flags); ++ ++ writel(ESPI_FLASH_RX_CTRL_SERV_PEND, espi->regs + ESPI_FLASH_RX_CTRL); ++ flash->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_flash_put_tx(struct file *fp, ++ struct ast2500_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2500_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(flash, struct ast2500_espi, flash); ++ ++ if (!mutex_trylock(&flash->tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_FLASH_TX_CTRL); ++ if (reg & ESPI_FLASH_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (flash->dma.enable) { ++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_FLASH_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_FLASH_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_LEN, len) ++ | ESPI_FLASH_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_FLASH_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2500_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2500_espi_flash *flash; ++ struct aspeed_espi_ioc ioc; ++ ++ flash = container_of(fp->private_data, struct ast2500_espi_flash, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_FLASH_GET_RX: ++ return ast2500_espi_flash_get_rx(fp, flash, &ioc); ++ case ASPEED_ESPI_FLASH_PUT_TX: ++ return ast2500_espi_flash_put_tx(fp, flash, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2500_espi_flash_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2500_espi_flash_ioctl, ++}; ++ ++static void ast2500_espi_flash_isr(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_flash *flash; ++ unsigned long flags; ++ uint32_t sts; ++ ++ flash = &espi->flash; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_FLASH_RX_CMPLT) { ++ spin_lock_irqsave(&flash->lock, flags); ++ flash->rx_ready = true; ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ wake_up_interruptible(&flash->wq); ++ ++ writel(ESPI_INT_STS_FLASH_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ } ++} ++ ++static void ast2500_espi_flash_reset(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_flash *flash = &espi->flash; ++ uint32_t reg; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_FLASH); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ writel(ESPI_INT_STS_FLASH, espi->regs + ESPI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_FLASH_TX_SW_RST ++ | ESPI_CTRL_FLASH_RX_SW_RST ++ | ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN ++ | ESPI_CTRL_FLASH_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_FLASH_TX_SW_RST | ESPI_CTRL_FLASH_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (flash->safs.mode == SAFS_MODE_MIX) { ++ reg = FIELD_PREP(ESPI_FLASH_SAFS_TADDR_BASE, flash->safs.taddr >> 24) ++ | FIELD_PREP(ESPI_FLASH_SAFS_TADDR_MASK, (~(flash->safs.size - 1)) >> 24); ++ writel(reg, espi->regs + ESPI_FLASH_SAFS_TADDR); ++ } else { ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SAFS_SW_MODE; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ if (flash->dma.enable) { ++ writel(flash->dma.tx_addr, espi->regs + ESPI_FLASH_TX_DMA); ++ writel(flash->dma.rx_addr, espi->regs + ESPI_FLASH_RX_DMA); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_FLASH_RX_CMPLT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2500_espi_flash_probe(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_flash *flash; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ init_waitqueue_head(&flash->wq); ++ ++ spin_lock_init(&flash->lock); ++ ++ mutex_init(&flash->tx_mtx); ++ mutex_init(&flash->rx_mtx); ++ ++ flash->safs.mode = SAFS_MODE_MIX; ++ ++ of_property_read_u32(dev->of_node, "flash-safs-mode", &flash->safs.mode); ++ if (flash->safs.mode == SAFS_MODE_MIX) { ++ rc = of_property_read_u32(dev->of_node, "flash-safs-tgt-addr", &flash->safs.taddr); ++ if (rc || !IS_ALIGNED(flash->safs.taddr, FLASH_SAFS_ALIGN)) { ++ dev_err(dev, "cannot get 16MB-aligned SAFS target address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "flash-safs-size", &flash->safs.size); ++ if (rc || !IS_ALIGNED(flash->safs.size, FLASH_SAFS_ALIGN)) { ++ dev_err(dev, "cannot get 16MB-aligned SAFS size\n"); ++ return -ENODEV; ++ } ++ } ++ ++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode"); ++ if (flash->dma.enable) { ++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL); ++ if (!flash->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL); ++ if (!flash->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA RX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ flash->mdev.parent = dev; ++ flash->mdev.minor = MISC_DYNAMIC_MINOR; ++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash", DEVICE_NAME); ++ flash->mdev.fops = &ast2500_espi_flash_fops; ++ rc = misc_register(&flash->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", flash->mdev.name); ++ return rc; ++ } ++ ++ ast2500_espi_flash_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2500_espi_flash_remove(struct ast2500_espi *espi) ++{ ++ struct ast2500_espi_flash *flash; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_FLASH); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN ++ | ESPI_CTRL_FLASH_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (flash->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr); ++ } ++ ++ mutex_destroy(&flash->tx_mtx); ++ mutex_destroy(&flash->rx_mtx); ++ ++ misc_deregister(&flash->mdev); ++ ++ return 0; ++} ++ ++/* global control */ ++static irqreturn_t ast2500_espi_isr(int irq, void *arg) ++{ ++ struct ast2500_espi *espi; ++ uint32_t sts; ++ ++ espi = (struct ast2500_espi *)arg; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ if (!sts) ++ return IRQ_NONE; ++ ++ if (sts & ESPI_INT_STS_PERIF) ++ ast2500_espi_perif_isr(espi); ++ ++ if (sts & ESPI_INT_STS_VW) ++ ast2500_espi_vw_isr(espi); ++ ++ if (sts & ESPI_INT_STS_OOB) ++ ast2500_espi_oob_isr(espi); ++ ++ if (sts & ESPI_INT_STS_FLASH) ++ ast2500_espi_flash_isr(espi); ++ ++ if (sts & ESPI_INT_STS_RST_DEASSERT) { ++ ast2500_espi_perif_reset(espi); ++ ast2500_espi_vw_reset(espi); ++ ast2500_espi_oob_reset(espi); ++ ast2500_espi_flash_reset(espi); ++ writel(ESPI_INT_STS_RST_DEASSERT, espi->regs + ESPI_INT_STS); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int ast2500_espi_probe(struct platform_device *pdev) ++{ ++ struct ast2500_espi *espi; ++ struct resource *res; ++ struct device *dev; ++ uint32_t reg; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL); ++ if (!espi) ++ return -ENOMEM; ++ ++ espi->dev = dev; ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return rc; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "cannot get resource\n"); ++ return -ENODEV; ++ } ++ ++ espi->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(espi->regs)) { ++ dev_err(dev, "cannot map registers\n"); ++ return PTR_ERR(espi->regs); ++ } ++ ++ espi->irq = platform_get_irq(pdev, 0); ++ if (espi->irq < 0) { ++ dev_err(dev, "cannot get IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ espi->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(espi->clk)) { ++ dev_err(dev, "cannot get clock control\n"); ++ return PTR_ERR(espi->clk); ++ } ++ ++ rc = clk_prepare_enable(espi->clk); ++ if (rc) { ++ dev_err(dev, "cannot enable clocks\n"); ++ return rc; ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ rc = ast2500_espi_perif_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init peripheral channel, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = ast2500_espi_vw_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init vw channel, rc=%d\n", rc); ++ goto err_remove_perif; ++ } ++ ++ rc = ast2500_espi_oob_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init oob channel, rc=%d\n", rc); ++ goto err_remove_vw; ++ } ++ ++ rc = ast2500_espi_flash_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init flash channel, rc=%d\n", rc); ++ goto err_remove_oob; ++ } ++ ++ rc = devm_request_irq(dev, espi->irq, ast2500_espi_isr, 0, dev_name(dev), espi); ++ if (rc) { ++ dev_err(dev, "cannot request IRQ\n"); ++ goto err_remove_flash; ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg |= ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ dev_set_drvdata(dev, espi); ++ ++ dev_info(dev, "module loaded\n"); ++ ++ return 0; ++ ++err_remove_flash: ++ ast2500_espi_flash_remove(espi); ++err_remove_oob: ++ ast2500_espi_oob_remove(espi); ++err_remove_vw: ++ ast2500_espi_vw_remove(espi); ++err_remove_perif: ++ ast2500_espi_perif_remove(espi); ++ ++ return rc; ++} ++ ++static int ast2500_espi_remove(struct platform_device *pdev) ++{ ++ struct ast2500_espi *espi; ++ struct device *dev; ++ uint32_t reg; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = (struct ast2500_espi *)dev_get_drvdata(dev); ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~(ESPI_INT_EN_RST_DEASSERT); ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ rc = ast2500_espi_perif_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2500_espi_vw_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2500_espi_oob_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2500_espi_flash_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ return 0; ++} ++ ++static const struct of_device_id ast2500_espi_of_matches[] = { ++ { .compatible = "aspeed,ast2500-espi" }, ++ { }, ++}; ++ ++static struct platform_driver ast2500_espi_driver = { ++ .driver = { ++ .name = "ast2500-espi", ++ .of_match_table = ast2500_espi_of_matches, ++ }, ++ .probe = ast2500_espi_probe, ++ .remove = ast2500_espi_remove, ++}; ++ ++module_platform_driver(ast2500_espi_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_DESCRIPTION("Control of AST2500 eSPI Device"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/ast2500-espi.h b/drivers/soc/aspeed/ast2500-espi.h +--- a/drivers/soc/aspeed/ast2500-espi.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2500-espi.h 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,250 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#ifndef _AST2500_ESPI_H_ ++#define _AST2500_ESPI_H_ ++ ++#include ++#include "aspeed-espi-comm.h" ++ ++/* registers */ ++#define ESPI_CTRL 0x000 ++#define ESPI_CTRL_FLASH_TX_SW_RST BIT(31) ++#define ESPI_CTRL_FLASH_RX_SW_RST BIT(30) ++#define ESPI_CTRL_OOB_TX_SW_RST BIT(29) ++#define ESPI_CTRL_OOB_RX_SW_RST BIT(28) ++#define ESPI_CTRL_PERIF_NP_TX_SW_RST BIT(27) ++#define ESPI_CTRL_PERIF_NP_RX_SW_RST BIT(26) ++#define ESPI_CTRL_PERIF_PC_TX_SW_RST BIT(25) ++#define ESPI_CTRL_PERIF_PC_RX_SW_RST BIT(24) ++#define ESPI_CTRL_FLASH_TX_DMA_EN BIT(23) ++#define ESPI_CTRL_FLASH_RX_DMA_EN BIT(22) ++#define ESPI_CTRL_OOB_TX_DMA_EN BIT(21) ++#define ESPI_CTRL_OOB_RX_DMA_EN BIT(20) ++#define ESPI_CTRL_PERIF_NP_TX_DMA_EN BIT(19) ++#define ESPI_CTRL_PERIF_PC_TX_DMA_EN BIT(17) ++#define ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16) ++#define ESPI_CTRL_FLASH_SAFS_SW_MODE BIT(10) ++#define ESPI_CTRL_VW_GPIO_SW BIT(9) ++#define ESPI_CTRL_FLASH_SW_RDY BIT(7) ++#define ESPI_CTRL_OOB_SW_RDY BIT(4) ++#define ESPI_CTRL_VW_SW_RDY BIT(3) ++#define ESPI_CTRL_PERIF_SW_RDY BIT(1) ++#define ESPI_STS 0x004 ++#define ESPI_INT_STS 0x008 ++#define ESPI_INT_STS_RST_DEASSERT BIT(31) ++#define ESPI_INT_STS_OOB_RX_TMOUT BIT(23) ++#define ESPI_INT_STS_VW_SYSEVT1 BIT(22) ++#define ESPI_INT_STS_FLASH_TX_ERR BIT(21) ++#define ESPI_INT_STS_OOB_TX_ERR BIT(20) ++#define ESPI_INT_STS_FLASH_TX_ABT BIT(19) ++#define ESPI_INT_STS_OOB_TX_ABT BIT(18) ++#define ESPI_INT_STS_PERIF_NP_TX_ABT BIT(17) ++#define ESPI_INT_STS_PERIF_PC_TX_ABT BIT(16) ++#define ESPI_INT_STS_FLASH_RX_ABT BIT(15) ++#define ESPI_INT_STS_OOB_RX_ABT BIT(14) ++#define ESPI_INT_STS_PERIF_NP_RX_ABT BIT(13) ++#define ESPI_INT_STS_PERIF_PC_RX_ABT BIT(12) ++#define ESPI_INT_STS_PERIF_NP_TX_ERR BIT(11) ++#define ESPI_INT_STS_PERIF_PC_TX_ERR BIT(10) ++#define ESPI_INT_STS_VW_GPIO BIT(9) ++#define ESPI_INT_STS_VW_SYSEVT BIT(8) ++#define ESPI_INT_STS_FLASH_TX_CMPLT BIT(7) ++#define ESPI_INT_STS_FLASH_RX_CMPLT BIT(6) ++#define ESPI_INT_STS_OOB_TX_CMPLT BIT(5) ++#define ESPI_INT_STS_OOB_RX_CMPLT BIT(4) ++#define ESPI_INT_STS_PERIF_NP_TX_CMPLT BIT(3) ++#define ESPI_INT_STS_PERIF_PC_TX_CMPLT BIT(1) ++#define ESPI_INT_STS_PERIF_PC_RX_CMPLT BIT(0) ++#define ESPI_INT_EN 0x00c ++#define ESPI_INT_EN_RST_DEASSERT BIT(31) ++#define ESPI_INT_EN_OOB_RX_TMOUT BIT(23) ++#define ESPI_INT_EN_VW_SYSEVT1 BIT(22) ++#define ESPI_INT_EN_FLASH_TX_ERR BIT(21) ++#define ESPI_INT_EN_OOB_TX_ERR BIT(20) ++#define ESPI_INT_EN_FLASH_TX_ABT BIT(19) ++#define ESPI_INT_EN_OOB_TX_ABT BIT(18) ++#define ESPI_INT_EN_PERIF_NP_TX_ABT BIT(17) ++#define ESPI_INT_EN_PERIF_PC_TX_ABT BIT(16) ++#define ESPI_INT_EN_FLASH_RX_ABT BIT(15) ++#define ESPI_INT_EN_OOB_RX_ABT BIT(14) ++#define ESPI_INT_EN_PERIF_NP_RX_ABT BIT(13) ++#define ESPI_INT_EN_PERIF_PC_RX_ABT BIT(12) ++#define ESPI_INT_EN_PERIF_NP_TX_ERR BIT(11) ++#define ESPI_INT_EN_PERIF_PC_TX_ERR BIT(10) ++#define ESPI_INT_EN_VW_GPIO BIT(9) ++#define ESPI_INT_EN_VW_SYSEVT BIT(8) ++#define ESPI_INT_EN_FLASH_TX_CMPLT BIT(7) ++#define ESPI_INT_EN_FLASH_RX_CMPLT BIT(6) ++#define ESPI_INT_EN_OOB_TX_CMPLT BIT(5) ++#define ESPI_INT_EN_OOB_RX_CMPLT BIT(4) ++#define ESPI_INT_EN_PERIF_NP_TX_CMPLT BIT(3) ++#define ESPI_INT_EN_PERIF_PC_TX_CMPLT BIT(1) ++#define ESPI_INT_EN_PERIF_PC_RX_CMPLT BIT(0) ++#define ESPI_PERIF_PC_RX_DMA 0x010 ++#define ESPI_PERIF_PC_RX_CTRL 0x014 ++#define ESPI_PERIF_PC_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_PERIF_PC_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_PC_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_PC_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_PC_RX_DATA 0x018 ++#define ESPI_PERIF_PC_TX_DMA 0x020 ++#define ESPI_PERIF_PC_TX_CTRL 0x024 ++#define ESPI_PERIF_PC_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_PERIF_PC_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_PC_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_PC_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_PC_TX_DATA 0x028 ++#define ESPI_PERIF_NP_TX_DMA 0x030 ++#define ESPI_PERIF_NP_TX_CTRL 0x034 ++#define ESPI_PERIF_NP_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_PERIF_NP_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_NP_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_NP_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_NP_TX_DATA 0x038 ++#define ESPI_OOB_RX_DMA 0x040 ++#define ESPI_OOB_RX_CTRL 0x044 ++#define ESPI_OOB_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_OOB_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_OOB_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_OOB_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_OOB_RX_DATA 0x048 ++#define ESPI_OOB_TX_DMA 0x050 ++#define ESPI_OOB_TX_CTRL 0x054 ++#define ESPI_OOB_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_OOB_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_OOB_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_OOB_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_OOB_TX_DATA 0x058 ++#define ESPI_FLASH_RX_DMA 0x060 ++#define ESPI_FLASH_RX_CTRL 0x064 ++#define ESPI_FLASH_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_FLASH_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_FLASH_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_FLASH_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_FLASH_RX_DATA 0x068 ++#define ESPI_FLASH_TX_DMA 0x070 ++#define ESPI_FLASH_TX_CTRL 0x074 ++#define ESPI_FLASH_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_FLASH_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_FLASH_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_FLASH_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_FLASH_TX_DATA 0x078 ++#define ESPI_PERIF_MCYC_SADDR 0x084 ++#define ESPI_PERIF_MCYC_TADDR 0x088 ++#define ESPI_PERIF_MCYC_MASK 0x08c ++#define ESPI_FLASH_SAFS_TADDR 0x090 ++#define ESPI_FLASH_SAFS_TADDR_BASE GENMASK(31, 24) ++#define ESPI_FLASH_SAFS_TADDR_MASK GENMASK(15, 8) ++#define ESPI_VW_SYSEVT_INT_EN 0x094 ++#define ESPI_VW_SYSEVT_INT_EN_HOST_RST_WARN BIT(8) ++#define ESPI_VW_SYSEVT_INT_EN_OOB_RST_WARN BIT(6) ++#define ESPI_VW_SYSEVT 0x098 ++#define ESPI_VW_SYSEVT_HOST_RST_ACK BIT(27) ++#define ESPI_VW_SYSEVT_SLV_BOOT_STS BIT(23) ++#define ESPI_VW_SYSEVT_SLV_BOOT_DONE BIT(20) ++#define ESPI_VW_SYSEVT_OOB_RST_ACK BIT(16) ++#define ESPI_VW_SYSEVT_HOST_RST_WARN BIT(8) ++#define ESPI_VW_SYSEVT_OOB_RST_WARN BIT(6) ++#define ESPI_VW_GPIO_VAL 0x09c ++#define ESPI_GEN_CAP_N_CONF 0x0a0 ++#define ESPI_CH0_CAP_N_CONF 0x0a4 ++#define ESPI_CH1_CAP_N_CONF 0x0a8 ++#define ESPI_CH2_CAP_N_CONF 0x0ac ++#define ESPI_CH3_CAP_N_CONF 0x0b0 ++#define ESPI_CH3_CAP_N_CONF2 0x0b4 ++#define ESPI_VW_GPIO_DIR 0x0c0 ++#define ESPI_VW_GPIO_GRP 0x0c4 ++#define ESPI_VW_SYSEVT1_INT_EN 0x100 ++#define ESPI_VW_SYSEVT1_INT_EN_SUSPEND_WARN BIT(0) ++#define ESPI_VW_SYSEVT1 0x104 ++#define ESPI_VW_SYSEVT1_SUSPEND_ACK BIT(20) ++#define ESPI_VW_SYSEVT1_SUSPEND_WARN BIT(0) ++#define ESPI_VW_SYSEVT_INT_T0 0x110 ++#define ESPI_VW_SYSEVT_INT_T1 0x114 ++#define ESPI_VW_SYSEVT_INT_T2 0x118 ++#define ESPI_VW_SYSEVT_INT_T2_HOST_RST_WARN BIT(8) ++#define ESPI_VW_SYSEVT_INT_T2_OOB_RST_WARN BIT(6) ++#define ESPI_VW_SYSEVT_INT_STS 0x11c ++#define ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN BIT(8) ++#define ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN BIT(6) ++#define ESPI_VW_SYSEVT1_INT_T0 0x120 ++#define ESPI_VW_SYSEVT1_INT_T0_SUSPEND_WARN BIT(0) ++#define ESPI_VW_SYSEVT1_INT_T1 0x124 ++#define ESPI_VW_SYSEVT1_INT_T2 0x128 ++#define ESPI_VW_SYSEVT1_INT_STS 0x12c ++#define ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN BIT(0) ++ ++/* collect ESPI_INT_EN bits for convenience */ ++#define ESPI_INT_EN_PERIF \ ++ (ESPI_INT_EN_PERIF_NP_TX_ABT | \ ++ ESPI_INT_EN_PERIF_PC_TX_ABT | \ ++ ESPI_INT_EN_PERIF_NP_RX_ABT | \ ++ ESPI_INT_EN_PERIF_PC_RX_ABT | \ ++ ESPI_INT_EN_PERIF_NP_TX_ERR | \ ++ ESPI_INT_EN_PERIF_PC_TX_ERR | \ ++ ESPI_INT_EN_PERIF_NP_TX_CMPLT | \ ++ ESPI_INT_EN_PERIF_PC_TX_CMPLT | \ ++ ESPI_INT_EN_PERIF_PC_RX_CMPLT) ++ ++#define ESPI_INT_EN_VW \ ++ (ESPI_INT_EN_VW_SYSEVT1 | \ ++ ESPI_INT_EN_VW_GPIO | \ ++ ESPI_INT_EN_VW_SYSEVT) ++ ++#define ESPI_INT_EN_OOB \ ++ (ESPI_INT_EN_OOB_RX_TMOUT | \ ++ ESPI_INT_EN_OOB_TX_ERR | \ ++ ESPI_INT_EN_OOB_TX_ABT | \ ++ ESPI_INT_EN_OOB_RX_ABT | \ ++ ESPI_INT_EN_OOB_TX_CMPLT | \ ++ ESPI_INT_EN_OOB_RX_CMPLT) ++ ++#define ESPI_INT_EN_FLASH \ ++ (ESPI_INT_EN_FLASH_TX_ERR | \ ++ ESPI_INT_EN_FLASH_TX_ABT | \ ++ ESPI_INT_EN_FLASH_RX_ABT | \ ++ ESPI_INT_EN_FLASH_TX_CMPLT | \ ++ ESPI_INT_EN_FLASH_RX_CMPLT) ++ ++/* collect ESPI_INT_STS bits for convenience */ ++#define ESPI_INT_STS_PERIF \ ++ (ESPI_INT_STS_PERIF_NP_TX_ABT | \ ++ ESPI_INT_STS_PERIF_PC_TX_ABT | \ ++ ESPI_INT_STS_PERIF_NP_RX_ABT | \ ++ ESPI_INT_STS_PERIF_PC_RX_ABT | \ ++ ESPI_INT_STS_PERIF_NP_TX_ERR | \ ++ ESPI_INT_STS_PERIF_PC_TX_ERR | \ ++ ESPI_INT_STS_PERIF_NP_TX_CMPLT | \ ++ ESPI_INT_STS_PERIF_PC_TX_CMPLT | \ ++ ESPI_INT_STS_PERIF_PC_RX_CMPLT) ++ ++#define ESPI_INT_STS_VW \ ++ (ESPI_INT_STS_VW_SYSEVT1 | \ ++ ESPI_INT_STS_VW_GPIO | \ ++ ESPI_INT_STS_VW_SYSEVT) ++ ++#define ESPI_INT_STS_OOB \ ++ (ESPI_INT_STS_OOB_RX_TMOUT | \ ++ ESPI_INT_STS_OOB_TX_ERR | \ ++ ESPI_INT_STS_OOB_TX_ABT | \ ++ ESPI_INT_STS_OOB_RX_ABT | \ ++ ESPI_INT_STS_OOB_TX_CMPLT | \ ++ ESPI_INT_STS_OOB_RX_CMPLT) ++ ++#define ESPI_INT_STS_FLASH \ ++ (ESPI_INT_STS_FLASH_TX_ERR | \ ++ ESPI_INT_STS_FLASH_TX_ABT | \ ++ ESPI_INT_STS_FLASH_RX_ABT | \ ++ ESPI_INT_STS_FLASH_TX_CMPLT | \ ++ ESPI_INT_STS_FLASH_RX_CMPLT) ++ ++/* consistent with DTS property "flash-safs-mode" */ ++enum ast2500_safs_mode { ++ SAFS_MODE_MIX = 0x0, ++ SAFS_MODE_SW, ++ SAFS_MODES, ++}; ++ ++#endif +diff --git a/drivers/soc/aspeed/ast2600-espi.c b/drivers/soc/aspeed/ast2600-espi.c +--- a/drivers/soc/aspeed/ast2600-espi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2600-espi.c 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,2188 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ast2600-espi.h" ++ ++#define DEVICE_NAME "aspeed-espi" ++ ++#define PERIF_MCYC_ALIGN SZ_64K ++#define PERIF_MMBI_ALIGN SZ_64K ++#define PERIF_MMBI_INST_NUM 8 ++ ++#define OOB_DMA_RPTR_KEY 0x45538073 ++#define OOB_DMA_DESC_NUM 8 ++#define OOB_DMA_DESC_CUSTOM 0x4 ++ ++#define FLASH_SAFS_ALIGN SZ_16M ++ ++struct ast2600_espi_perif_mmbi { ++ void *b2h_virt; ++ void *h2b_virt; ++ dma_addr_t b2h_addr; ++ dma_addr_t h2b_addr; ++ struct miscdevice b2h_mdev; ++ struct miscdevice h2b_mdev; ++ bool host_rwp_update; ++ wait_queue_head_t wq; ++ struct ast2600_espi_perif *perif; ++}; ++ ++struct ast2600_espi_perif { ++ struct { ++ bool enable; ++ int irq; ++ void *virt; ++ dma_addr_t taddr; ++ uint32_t saddr; ++ uint32_t size; ++ uint32_t inst_size; ++ struct ast2600_espi_perif_mmbi inst[PERIF_MMBI_INST_NUM]; ++ } mmbi; ++ ++ struct { ++ bool enable; ++ void *virt; ++ dma_addr_t taddr; ++ uint32_t saddr; ++ uint32_t size; ++ } mcyc; ++ ++ struct { ++ bool enable; ++ void *np_tx_virt; ++ dma_addr_t np_tx_addr; ++ void *pc_tx_virt; ++ dma_addr_t pc_tx_addr; ++ void *pc_rx_virt; ++ dma_addr_t pc_rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex np_tx_mtx; ++ struct mutex pc_tx_mtx; ++ struct mutex pc_rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2600_espi_vw { ++ struct { ++ bool hw_mode; ++ uint32_t dir; ++ uint32_t val; ++ } gpio; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2600_espi_oob_dma_tx_desc { ++ uint32_t data_addr; ++ uint8_t cyc; ++ uint16_t tag : 4; ++ uint16_t len : 12; ++ uint8_t msg_type : 3; ++ uint8_t raz0 : 1; ++ uint8_t pec : 1; ++ uint8_t int_en : 1; ++ uint8_t pause : 1; ++ uint8_t raz1 : 1; ++ uint32_t raz2; ++ uint32_t raz3; ++} __packed; ++ ++struct ast2600_espi_oob_dma_rx_desc { ++ uint32_t data_addr; ++ uint8_t cyc; ++ uint16_t tag : 4; ++ uint16_t len : 12; ++ uint8_t raz : 7; ++ uint8_t dirty : 1; ++} __packed; ++ ++struct ast2600_espi_oob { ++ struct { ++ bool enable; ++ struct ast2600_espi_oob_dma_tx_desc *txd_virt; ++ dma_addr_t txd_addr; ++ struct ast2600_espi_oob_dma_rx_desc *rxd_virt; ++ dma_addr_t rxd_addr; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex tx_mtx; ++ struct mutex rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2600_espi_flash { ++ struct { ++ uint32_t mode; ++ phys_addr_t taddr; ++ uint32_t size; ++ } safs; ++ ++ struct { ++ bool enable; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex rx_mtx; ++ struct mutex tx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2600_espi { ++ struct device *dev; ++ void __iomem *regs; ++ struct reset_control *rst; ++ struct clk *clk; ++ int irq; ++ ++ struct ast2600_espi_perif perif; ++ struct ast2600_espi_vw vw; ++ struct ast2600_espi_oob oob; ++ struct ast2600_espi_flash flash; ++}; ++ ++/* peripheral channel (CH0) */ ++static int ast2600_espi_mmbi_b2h_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ struct ast2600_espi_perif *perif; ++ struct ast2600_espi *espi; ++ unsigned long vm_size; ++ pgprot_t prot; ++ ++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, b2h_mdev); ++ ++ perif = mmbi->perif; ++ ++ espi = container_of(perif, struct ast2600_espi, perif); ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (SZ_4K << perif->mmbi.inst_size)) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (mmbi->b2h_addr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static int ast2600_espi_mmbi_h2b_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ struct ast2600_espi_perif *perif; ++ struct ast2600_espi *espi; ++ unsigned long vm_size; ++ pgprot_t prot; ++ ++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, h2b_mdev); ++ ++ perif = mmbi->perif; ++ ++ espi = container_of(perif, struct ast2600_espi, perif); ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (SZ_4K << perif->mmbi.inst_size)) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (mmbi->h2b_addr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static __poll_t ast2600_espi_mmbi_h2b_poll(struct file *fp, struct poll_table_struct *pt) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ ++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, h2b_mdev); ++ ++ poll_wait(fp, &mmbi->wq, pt); ++ ++ if (!mmbi->host_rwp_update) ++ return 0; ++ ++ mmbi->host_rwp_update = false; ++ ++ return EPOLLIN; ++} ++ ++static long ast2600_espi_perif_pc_get_rx(struct file *fp, ++ struct ast2600_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2600_espi, perif); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&perif->pc_rx_mtx)) ++ return -EAGAIN; ++ ++ if (!perif->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&perif->pc_rx_mtx); ++ ++ if (!perif->rx_ready) { ++ rc = wait_event_interruptible(perif->wq, perif->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_CTRL); ++ cyc = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_PERIF_MSG: ++ pkt_len = sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_MSG_D: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE: ++ case ESPI_PERIF_SUC_CMPLT_D_FIRST: ++ case ESPI_PERIF_SUC_CMPLT_D_LAST: ++ case ESPI_PERIF_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_cmplt); ++ break; ++ case ESPI_PERIF_SUC_CMPLT: ++ case ESPI_PERIF_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_perif_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (perif->dma.enable) { ++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ ++ writel(ESPI_PERIF_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_PERIF_PC_RX_CTRL); ++ perif->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&perif->pc_rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_perif_pc_put_tx(struct file *fp, ++ struct ast2600_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2600_espi, perif); ++ ++ if (!mutex_trylock(&perif->pc_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_PERIF_PC_TX_CTRL); ++ if (reg & ESPI_PERIF_PC_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_PERIF_PC_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_LEN, len) ++ | ESPI_PERIF_PC_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_PERIF_PC_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->pc_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_perif_np_put_tx(struct file *fp, ++ struct ast2600_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2600_espi, perif); ++ ++ if (!mutex_trylock(&perif->np_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_PERIF_NP_TX_CTRL); ++ if (reg & ESPI_PERIF_NP_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_PERIF_NP_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_LEN, len) ++ | ESPI_PERIF_NP_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_PERIF_NP_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->np_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2600_espi_perif *perif; ++ struct aspeed_espi_ioc ioc; ++ ++ perif = container_of(fp->private_data, struct ast2600_espi_perif, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_PERIF_PC_GET_RX: ++ return ast2600_espi_perif_pc_get_rx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_PC_PUT_TX: ++ return ast2600_espi_perif_pc_put_tx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_NP_PUT_TX: ++ return ast2600_espi_perif_np_put_tx(fp, perif, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static int ast2600_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2600_espi_perif *perif; ++ unsigned long vm_size; ++ pgprot_t vm_prot; ++ ++ perif = container_of(fp->private_data, struct ast2600_espi_perif, mdev); ++ if (!perif->mcyc.enable) ++ return -EPERM; ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ vm_prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size) ++ return -EINVAL; ++ ++ vm_prot = pgprot_noncached(vm_prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, vm_prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2600_espi_mmbi_b2h_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2600_espi_mmbi_b2h_mmap, ++}; ++ ++static const struct file_operations ast2600_espi_mmbi_h2b_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2600_espi_mmbi_h2b_mmap, ++ .poll = ast2600_espi_mmbi_h2b_poll, ++}; ++ ++static const struct file_operations ast2600_espi_perif_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2600_espi_perif_mmap, ++ .unlocked_ioctl = ast2600_espi_perif_ioctl, ++}; ++ ++static irqreturn_t ast2600_espi_perif_mmbi_isr(int irq, void *arg) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ struct ast2600_espi_perif *perif; ++ struct ast2600_espi *espi; ++ uint32_t sts, tmp; ++ uint32_t *p; ++ int i; ++ ++ espi = (struct ast2600_espi *)arg; ++ ++ perif = &espi->perif; ++ ++ sts = readl(espi->regs + ESPI_MMBI_INT_STS); ++ if (!sts) ++ return IRQ_NONE; ++ ++ for (i = 0, tmp = sts; i < PERIF_MMBI_INST_NUM; ++i, tmp >>= 2) { ++ if (!(tmp & 0x3)) ++ continue; ++ ++ mmbi = &perif->mmbi.inst[i]; ++ ++ p = (uint32_t *)mmbi->h2b_virt; ++ p[0] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i)); ++ p[1] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i) + 4); ++ ++ mmbi->host_rwp_update = true; ++ ++ wake_up_interruptible(&mmbi->wq); ++ } ++ ++ writel(sts, espi->regs + ESPI_MMBI_INT_STS); ++ ++ return IRQ_HANDLED; ++} ++ ++static void ast2600_espi_perif_isr(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_perif *perif; ++ unsigned long flags; ++ uint32_t sts; ++ ++ perif = &espi->perif; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_PERIF_PC_RX_CMPLT) { ++ writel(ESPI_INT_STS_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ perif->rx_ready = true; ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ wake_up_interruptible(&perif->wq); ++ } ++} ++ ++static void ast2600_espi_perif_sw_reset(struct ast2600_espi *espi) ++{ ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_SW_RST ++ | ESPI_CTRL_PERIF_NP_RX_SW_RST ++ | ESPI_CTRL_PERIF_PC_TX_SW_RST ++ | ESPI_CTRL_PERIF_PC_RX_SW_RST ++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN ++ | ESPI_CTRL_PERIF_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_PERIF_NP_TX_SW_RST ++ | ESPI_CTRL_PERIF_NP_RX_SW_RST ++ | ESPI_CTRL_PERIF_PC_TX_SW_RST ++ | ESPI_CTRL_PERIF_PC_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static void ast2600_espi_perif_reset(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_perif *perif; ++ struct device *dev; ++ uint32_t reg, mask; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ writel(ESPI_INT_EN_PERIF, espi->regs + ESPI_INT_EN_CLR); ++ writel(ESPI_INT_STS_PERIF, espi->regs + ESPI_INT_STS); ++ ++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL2); ++ reg &= ~(ESPI_CTRL2_MCYC_RD_DIS_WDT | ESPI_CTRL2_MCYC_WR_DIS_WDT); ++ writel(reg, espi->regs + ESPI_CTRL2); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN ++ | ESPI_CTRL_PERIF_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (perif->mmbi.enable) { ++ reg = readl(espi->regs + ESPI_MMBI_CTRL); ++ reg &= ~(ESPI_MMBI_CTRL_EN); ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ mask = ~(perif->mmbi.size - 1); ++ writel(mask, espi->regs + ESPI_PERIF_MMBI_MASK); ++ writel(perif->mmbi.saddr, espi->regs + ESPI_PERIF_MMBI_SADDR); ++ writel(perif->mmbi.taddr, espi->regs + ESPI_PERIF_MMBI_TADDR); ++ ++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_EN); ++ ++ reg = FIELD_PREP(ESPI_MMBI_CTRL_INST_SZ, perif->mmbi.inst_size) ++ | FIELD_PREP(ESPI_MMBI_CTRL_TOTAL_SZ, perif->mmbi.inst_size) ++ | ESPI_MMBI_CTRL_EN; ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_MMBI_RD_DIS | ESPI_CTRL2_MMBI_WR_DIS); ++ writel(reg, espi->regs + ESPI_CTRL2); ++ } ++ ++ if (perif->mcyc.enable) { ++ mask = ~(perif->mcyc.size - 1); ++ writel(mask, espi->regs + ESPI_PERIF_MCYC_MASK); ++ writel(perif->mcyc.saddr, espi->regs + ESPI_PERIF_MCYC_SADDR); ++ writel(perif->mcyc.taddr, espi->regs + ESPI_PERIF_MCYC_TADDR); ++ ++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_MCYC_RD_DIS | ESPI_CTRL2_MCYC_WR_DIS); ++ writel(reg, espi->regs + ESPI_CTRL2); ++ } ++ ++ if (perif->dma.enable) { ++ writel(perif->dma.np_tx_addr, espi->regs + ESPI_PERIF_NP_TX_DMA); ++ writel(perif->dma.pc_tx_addr, espi->regs + ESPI_PERIF_PC_TX_DMA); ++ writel(perif->dma.pc_rx_addr, espi->regs + ESPI_PERIF_PC_RX_DMA); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ writel(ESPI_INT_EN_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_PERIF_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2600_espi_perif_probe(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ struct ast2600_espi_perif *perif; ++ struct platform_device *pdev; ++ struct device *dev; ++ int i, rc; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ init_waitqueue_head(&perif->wq); ++ ++ spin_lock_init(&perif->lock); ++ ++ mutex_init(&perif->np_tx_mtx); ++ mutex_init(&perif->pc_tx_mtx); ++ mutex_init(&perif->pc_rx_mtx); ++ ++ perif->mmbi.enable = of_property_read_bool(dev->of_node, "perif-mmbi-enable"); ++ if (perif->mmbi.enable) { ++ pdev = container_of(dev, struct platform_device, dev); ++ ++ perif->mmbi.irq = platform_get_irq(pdev, 1); ++ if (perif->mmbi.irq < 0) { ++ dev_err(dev, "cannot get MMBI IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-src-addr", &perif->mmbi.saddr); ++ if (rc || !IS_ALIGNED(perif->mmbi.saddr, PERIF_MMBI_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned MMBI host address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-instance-size", &perif->mmbi.inst_size); ++ if (rc || perif->mmbi.inst_size >= MMBI_INST_SIZE_TYPES) { ++ dev_err(dev, "cannot get valid MMBI instance size\n"); ++ return -EINVAL; ++ } ++ ++ perif->mmbi.size = (SZ_8K << perif->mmbi.inst_size) * PERIF_MMBI_INST_NUM; ++ perif->mmbi.virt = dmam_alloc_coherent(dev, perif->mmbi.size, ++ &perif->mmbi.taddr, GFP_KERNEL); ++ if (!perif->mmbi.virt) { ++ dev_err(dev, "cannot allocate MMBI\n"); ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < PERIF_MMBI_INST_NUM; ++i) { ++ mmbi = &perif->mmbi.inst[i]; ++ ++ init_waitqueue_head(&mmbi->wq); ++ ++ mmbi->perif = perif; ++ mmbi->host_rwp_update = false; ++ ++ mmbi->b2h_virt = perif->mmbi.virt + ((SZ_4K << perif->mmbi.inst_size) * i); ++ mmbi->b2h_addr = perif->mmbi.taddr + ((SZ_4K << perif->mmbi.inst_size) * i); ++ mmbi->b2h_mdev.parent = dev; ++ mmbi->b2h_mdev.minor = MISC_DYNAMIC_MINOR; ++ mmbi->b2h_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi-b2h%d", DEVICE_NAME, i); ++ mmbi->b2h_mdev.fops = &ast2600_espi_mmbi_b2h_fops; ++ rc = misc_register(&mmbi->b2h_mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", mmbi->b2h_mdev.name); ++ return rc; ++ } ++ ++ mmbi->h2b_virt = perif->mmbi.virt + ((SZ_4K << perif->mmbi.inst_size) * (i + PERIF_MMBI_INST_NUM)); ++ mmbi->h2b_addr = perif->mmbi.taddr + ((SZ_4K << perif->mmbi.inst_size) * (i + PERIF_MMBI_INST_NUM)); ++ mmbi->h2b_mdev.parent = dev; ++ mmbi->h2b_mdev.minor = MISC_DYNAMIC_MINOR; ++ mmbi->h2b_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi-h2b%d", DEVICE_NAME, i); ++ mmbi->h2b_mdev.fops = &ast2600_espi_mmbi_h2b_fops; ++ rc = misc_register(&mmbi->h2b_mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", mmbi->h2b_mdev.name); ++ return rc; ++ } ++ } ++ } ++ ++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable"); ++ if (perif->mcyc.enable) { ++ if (perif->mmbi.enable) { ++ dev_err(dev, "cannot enable memory cycle, occupied by MMBI\n"); ++ return -EPERM; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr); ++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-size", &perif->mcyc.size); ++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n"); ++ return -EINVAL; ++ } ++ ++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size, ++ &perif->mcyc.taddr, GFP_KERNEL); ++ if (!perif->mcyc.virt) { ++ dev_err(dev, "cannot allocate memory cycle\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode"); ++ if (perif->dma.enable) { ++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_tx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_tx_virt) { ++ dev_err(dev, "cannot allocate posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_rx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_rx_virt) { ++ dev_err(dev, "cannot allocate posted RX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.np_tx_addr, GFP_KERNEL); ++ if (!perif->dma.np_tx_virt) { ++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ perif->mdev.parent = dev; ++ perif->mdev.minor = MISC_DYNAMIC_MINOR; ++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral", DEVICE_NAME); ++ perif->mdev.fops = &ast2600_espi_perif_fops; ++ rc = misc_register(&perif->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", perif->mdev.name); ++ return rc; ++ } ++ ++ ast2600_espi_perif_reset(espi); ++ ++ if (perif->mmbi.enable) { ++ rc = devm_request_irq(dev, espi->perif.mmbi.irq, ++ ast2600_espi_perif_mmbi_isr, 0, dev_name(dev), espi); ++ if (rc) { ++ dev_err(dev, "cannot request MMBI IRQ\n"); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ast2600_espi_perif_remove(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_perif_mmbi *mmbi; ++ struct ast2600_espi_perif *perif; ++ struct device *dev; ++ uint32_t reg; ++ int i; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ writel(ESPI_INT_EN_PERIF, espi->regs + ESPI_INT_EN_CLR); ++ ++ reg = readl(espi->regs + ESPI_CTRL2); ++ reg |= (ESPI_CTRL2_MCYC_RD_DIS | ESPI_CTRL2_MCYC_WR_DIS); ++ writel(reg, espi->regs + ESPI_CTRL2); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN ++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN ++ | ESPI_CTRL_PERIF_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (perif->mmbi.enable) { ++ reg = readl(espi->regs + ESPI_MMBI_CTRL); ++ reg &= ~ESPI_MMBI_CTRL_EN; ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ for (i = 0; i < PERIF_MMBI_INST_NUM; ++i) { ++ mmbi = &perif->mmbi.inst[i]; ++ misc_deregister(&mmbi->b2h_mdev); ++ misc_deregister(&mmbi->h2b_mdev); ++ } ++ ++ dmam_free_coherent(dev, perif->mmbi.size, perif->mmbi.virt, ++ perif->mmbi.taddr); ++ } ++ ++ if (perif->mcyc.enable) ++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt, ++ perif->mcyc.taddr); ++ ++ if (perif->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt, ++ perif->dma.np_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt, ++ perif->dma.pc_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt, ++ perif->dma.pc_rx_addr); ++ } ++ ++ mutex_destroy(&perif->np_tx_mtx); ++ mutex_destroy(&perif->pc_tx_mtx); ++ mutex_destroy(&perif->pc_rx_mtx); ++ ++ misc_deregister(&perif->mdev); ++ ++ return 0; ++} ++ ++/* virtual wire channel (CH1) */ ++static long ast2600_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2600_espi_vw *vw; ++ struct ast2600_espi *espi; ++ uint32_t gpio, hw_mode; ++ ++ vw = container_of(fp->private_data, struct ast2600_espi_vw, mdev); ++ espi = container_of(vw, struct ast2600_espi, vw); ++ gpio = vw->gpio.val; ++ hw_mode = vw->gpio.hw_mode; ++ ++ if (hw_mode) { ++ dev_err(espi->dev, "HW mode: vGPIO reflect on physical GPIO. Get state from GPIO driver.\n"); ++ return -EFAULT; ++ } ++ ++ switch (cmd) { ++ case ASPEED_ESPI_VW_GET_GPIO_VAL: ++ if (put_user(gpio, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to get vGPIO value\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Get vGPIO value: 0x%x\n", gpio); ++ break; ++ ++ case ASPEED_ESPI_VW_PUT_GPIO_VAL: ++ if (get_user(gpio, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to put vGPIO value\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Put vGPIO value: 0x%x\n", gpio); ++ writel(gpio, espi->regs + ESPI_VW_GPIO_VAL); ++ break; ++ ++ default: ++ return -EINVAL; ++ }; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2600_espi_vw_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2600_espi_vw_ioctl, ++}; ++ ++static void ast2600_espi_vw_isr(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_vw *vw; ++ uint32_t sts; ++ ++ vw = &espi->vw; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_VW_GPIO) { ++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL); ++ writel(ESPI_INT_STS_VW_GPIO, espi->regs + ESPI_INT_STS); ++ } else if (sts & ESPI_INT_STS_VW_SYSEVT) { ++ /* Handle system event */ ++ writel(ESPI_INT_STS_VW_SYSEVT, espi->regs + ESPI_INT_STS); ++ } else if (sts & (ESPI_INT_STS_VW_SYSEVT1)) { ++ /* Handle system event1 */ ++ writel(ESPI_INT_STS_VW_SYSEVT1, espi->regs + ESPI_INT_STS); ++ } ++} ++ ++static void ast2600_espi_vw_reset(struct ast2600_espi *espi) ++{ ++ uint32_t reg; ++ struct ast2600_espi_vw *vw = &espi->vw; ++ ++ writel(ESPI_INT_EN_VW, espi->regs + ESPI_INT_EN_CLR); ++ writel(ESPI_INT_STS_VW, espi->regs + ESPI_INT_STS); ++ ++ writel(vw->gpio.dir, espi->regs + ESPI_VW_GPIO_DIR); ++ ++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL); ++ ++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_VW_TX_SORT); ++ writel(reg, espi->regs + ESPI_CTRL2); ++ ++ writel(ESPI_INT_EN_VW_GPIO, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ((vw->gpio.hw_mode) ? 0 : ESPI_CTRL_VW_GPIO_SW) ++ | ESPI_CTRL_VW_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ writel(0x0, espi->regs + ESPI_VW_SYSEVT_INT_T0); ++ writel(0x0, espi->regs + ESPI_VW_SYSEVT_INT_T1); ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg |= ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ writel(0xffffffff, espi->regs + ESPI_VW_SYSEVT_INT_EN); ++ writel(0x1, espi->regs + ESPI_VW_SYSEVT1_INT_EN); ++ writel(0x1, espi->regs + ESPI_VW_SYSEVT1_INT_T0); ++} ++ ++static int ast2600_espi_vw_probe(struct ast2600_espi *espi) ++{ ++ int rc; ++ struct device *dev = espi->dev; ++ struct ast2600_espi_vw *vw = &espi->vw; ++ ++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode"); ++ of_property_read_u32(dev->of_node, "vw-gpio-direction", &vw->gpio.dir); ++ ++ vw->mdev.parent = dev; ++ vw->mdev.minor = MISC_DYNAMIC_MINOR; ++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw", DEVICE_NAME); ++ vw->mdev.fops = &ast2600_espi_vw_fops; ++ rc = misc_register(&vw->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", vw->mdev.name); ++ return rc; ++ } ++ ++ ast2600_espi_vw_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2600_espi_vw_remove(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_vw *vw; ++ ++ vw = &espi->vw; ++ ++ writel(ESPI_INT_EN_VW, espi->regs + ESPI_INT_EN_CLR); ++ ++ misc_deregister(&vw->mdev); ++ ++ return 0; ++} ++ ++/* out-of-band channel (CH2) */ ++static long ast2600_espi_oob_dma_get_rx(struct file *fp, ++ struct ast2600_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ struct ast2600_espi_oob_dma_rx_desc *d; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint32_t wptr, pkt_len; ++ unsigned long flags; ++ uint8_t *pkt; ++ int rc; ++ ++ espi = container_of(oob, struct ast2600_espi, oob); ++ ++ wptr = FIELD_PREP(ESPI_OOB_RX_DESC_WPTR_WP, readl(espi->regs + ESPI_OOB_RX_DESC_WPTR)); ++ ++ d = &oob->dma.rxd_virt[wptr]; ++ ++ if (!d->dirty) ++ return -EFAULT; ++ ++ pkt_len = ((d->len) ? d->len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr); ++ ++ if (ioc->pkt_len < pkt_len) ++ return -EINVAL; ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) ++ return -ENOMEM; ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = d->cyc; ++ hdr->tag = d->tag; ++ hdr->len_h = d->len >> 8; ++ hdr->len_l = d->len & 0xff; ++ memcpy(hdr + 1, oob->dma.rx_virt + (PAGE_SIZE * wptr), pkt_len - sizeof(*hdr)); ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ ++ /* make current descriptor available again */ ++ d->dirty = 0; ++ ++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM; ++ writel(wptr | ESPI_OOB_RX_DESC_WPTR_RECV_EN, espi->regs + ESPI_OOB_RX_DESC_WPTR); ++ ++ /* set ready flag base on the next RX descriptor */ ++ oob->rx_ready = oob->dma.rxd_virt[wptr].dirty; ++ ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++ return rc; ++} ++ ++static long ast2600_espi_oob_get_rx(struct file *fp, ++ struct ast2600_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2600_espi, oob); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&oob->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!oob->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&oob->rx_mtx); ++ ++ if (!oob->rx_ready) { ++ rc = wait_event_interruptible(oob->wq, oob->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ if (oob->dma.enable) { ++ rc = ast2600_espi_oob_dma_get_rx(fp, oob, ioc); ++ goto unlock_mtx_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_OOB_RX_CTRL); ++ cyc = FIELD_GET(ESPI_OOB_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_OOB_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_OOB_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr); ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ pkt[i] = readl(espi->regs + ESPI_OOB_RX_DATA) & 0xff; ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ ++ writel(ESPI_OOB_RX_CTRL_SERV_PEND, espi->regs + ESPI_OOB_RX_CTRL); ++ oob->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_oob_dma_put_tx(struct file *fp, ++ struct ast2600_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ struct ast2600_espi_oob_dma_tx_desc *d; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint32_t rptr, wptr; ++ uint8_t *pkt; ++ int rc; ++ ++ espi = container_of(oob, struct ast2600_espi, oob); ++ ++ pkt = vzalloc(ioc->pkt_len); ++ if (!pkt) ++ return -ENOMEM; ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* kick HW to update descriptor read/write pointer */ ++ writel(ESPI_OOB_TX_DESC_RPTR_UPDATE, espi->regs + ESPI_OOB_TX_DESC_RPTR); ++ ++ rptr = readl(espi->regs + ESPI_OOB_TX_DESC_RPTR); ++ wptr = readl(espi->regs + ESPI_OOB_TX_DESC_WPTR); ++ ++ if (((wptr + 1) % OOB_DMA_DESC_NUM) == rptr) { ++ rc = -EBUSY; ++ goto free_n_out; ++ } ++ ++ d = &oob->dma.txd_virt[wptr]; ++ d->cyc = hdr->cyc; ++ d->tag = hdr->tag; ++ d->len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ d->msg_type = OOB_DMA_DESC_CUSTOM; ++ ++ memcpy(oob->dma.tx_virt + (PAGE_SIZE * wptr), hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ ++ dma_wmb(); ++ ++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM; ++ writel(wptr | ESPI_OOB_TX_DESC_WPTR_SEND_EN, espi->regs + ESPI_OOB_TX_DESC_WPTR); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++ return rc; ++} ++ ++static long ast2600_espi_oob_put_tx(struct file *fp, ++ struct ast2600_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2600_espi, oob); ++ ++ if (!mutex_trylock(&oob->tx_mtx)) ++ return -EAGAIN; ++ ++ if (oob->dma.enable) { ++ rc = ast2600_espi_oob_dma_put_tx(fp, oob, ioc); ++ goto unlock_mtx_n_out; ++ } ++ ++ reg = readl(espi->regs + ESPI_OOB_TX_CTRL); ++ if (reg & ESPI_OOB_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_OOB_TX_DATA); ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_OOB_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_OOB_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_OOB_TX_CTRL_LEN, len) ++ | ESPI_OOB_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_OOB_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2600_espi_oob *oob; ++ struct aspeed_espi_ioc ioc; ++ ++ oob = container_of(fp->private_data, struct ast2600_espi_oob, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_OOB_GET_RX: ++ return ast2600_espi_oob_get_rx(fp, oob, &ioc); ++ case ASPEED_ESPI_OOB_PUT_TX: ++ return ast2600_espi_oob_put_tx(fp, oob, &ioc); ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2600_espi_oob_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2600_espi_oob_ioctl, ++}; ++ ++static void ast2600_espi_oob_isr(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_oob *oob; ++ unsigned long flags; ++ uint32_t sts; ++ ++ oob = &espi->oob; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_OOB_RX_CMPLT) { ++ writel(ESPI_INT_STS_OOB_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ oob->rx_ready = true; ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ wake_up_interruptible(&oob->wq); ++ } ++} ++ ++static void ast2600_espi_oob_reset(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_oob *oob; ++ dma_addr_t tx_addr, rx_addr; ++ uint32_t reg; ++ int i; ++ ++ writel(ESPI_INT_EN_OOB, espi->regs + ESPI_INT_EN_CLR); ++ writel(ESPI_INT_STS_OOB, espi->regs + ESPI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_OOB_TX_SW_RST ++ | ESPI_CTRL_OOB_RX_SW_RST ++ | ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN ++ | ESPI_CTRL_OOB_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_OOB_TX_SW_RST | ESPI_CTRL_OOB_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ oob = &espi->oob; ++ ++ if (oob->dma.enable) { ++ tx_addr = oob->dma.tx_addr; ++ rx_addr = oob->dma.rx_addr; ++ ++ for (i = 0; i < OOB_DMA_DESC_NUM; ++i) { ++ oob->dma.txd_virt[i].data_addr = tx_addr; ++ tx_addr += PAGE_SIZE; ++ ++ oob->dma.rxd_virt[i].data_addr = rx_addr; ++ oob->dma.rxd_virt[i].dirty = 0; ++ rx_addr += PAGE_SIZE; ++ } ++ ++ writel(oob->dma.txd_addr, espi->regs + ESPI_OOB_TX_DMA); ++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_OOB_TX_DESC_RPTR); ++ writel(0x0, espi->regs + ESPI_OOB_TX_DESC_WPTR); ++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_OOB_TX_DESC_NUM); ++ ++ writel(oob->dma.rxd_addr, espi->regs + ESPI_OOB_RX_DMA); ++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_OOB_RX_DESC_RPTR); ++ writel(0x0, espi->regs + ESPI_OOB_RX_DESC_WPTR); ++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_OOB_RX_DESC_NUM); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ /* activate RX DMA to make OOB_FREE */ ++ writel(ESPI_OOB_RX_DESC_WPTR_RECV_EN, espi->regs + ESPI_OOB_RX_DESC_WPTR); ++ } ++ ++ writel(ESPI_INT_EN_OOB_RX_CMPLT, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_OOB_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2600_espi_oob_probe(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_oob *oob; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ init_waitqueue_head(&oob->wq); ++ ++ spin_lock_init(&oob->lock); ++ ++ mutex_init(&oob->tx_mtx); ++ mutex_init(&oob->rx_mtx); ++ ++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode"); ++ if (oob->dma.enable) { ++ oob->dma.txd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, &oob->dma.txd_addr, GFP_KERNEL); ++ if (!oob->dma.txd_virt) { ++ dev_err(dev, "cannot allocate DMA TX descriptor\n"); ++ return -ENOMEM; ++ } ++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.tx_addr, GFP_KERNEL); ++ if (!oob->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ oob->dma.rxd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, &oob->dma.rxd_addr, GFP_KERNEL); ++ if (!oob->dma.rxd_virt) { ++ dev_err(dev, "cannot allocate DMA RX descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.rx_addr, GFP_KERNEL); ++ if (!oob->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ oob->mdev.parent = dev; ++ oob->mdev.minor = MISC_DYNAMIC_MINOR; ++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob", DEVICE_NAME); ++ oob->mdev.fops = &ast2600_espi_oob_fops; ++ rc = misc_register(&oob->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", oob->mdev.name); ++ return rc; ++ } ++ ++ ast2600_espi_oob_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2600_espi_oob_remove(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_oob *oob; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ writel(ESPI_INT_EN_OOB, espi->regs + ESPI_INT_EN_CLR); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_OOB_TX_DMA_EN ++ | ESPI_CTRL_OOB_RX_DMA_EN ++ | ESPI_CTRL_OOB_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (oob->dma.enable) { ++ dmam_free_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, ++ oob->dma.txd_virt, oob->dma.txd_addr); ++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, ++ oob->dma.tx_virt, oob->dma.tx_addr); ++ dmam_free_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, ++ oob->dma.rxd_virt, oob->dma.rxd_addr); ++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, ++ oob->dma.rx_virt, oob->dma.rx_addr); ++ } ++ ++ mutex_destroy(&oob->tx_mtx); ++ mutex_destroy(&oob->rx_mtx); ++ ++ misc_deregister(&oob->mdev); ++ ++ return 0; ++} ++ ++/* flash channel (CH3) */ ++static long ast2600_espi_flash_get_rx(struct file *fp, ++ struct ast2600_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ rc = 0; ++ ++ espi = container_of(flash, struct ast2600_espi, flash); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&flash->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!flash->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&flash->rx_mtx); ++ ++ if (!flash->rx_ready) { ++ rc = wait_event_interruptible(flash->wq, flash->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_FLASH_RX_CTRL); ++ cyc = FIELD_GET(ESPI_FLASH_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_FLASH_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_FLASH_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_FLASH_WRITE: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_READ: ++ case ESPI_FLASH_ERASE: ++ pkt_len = sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE: ++ case ESPI_FLASH_SUC_CMPLT_D_FIRST: ++ case ESPI_FLASH_SUC_CMPLT_D_LAST: ++ case ESPI_FLASH_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_cmplt); ++ break; ++ case ESPI_FLASH_SUC_CMPLT: ++ case ESPI_FLASH_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_flash_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (flash->dma.enable) { ++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ pkt[i] = readl(espi->regs + ESPI_FLASH_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&flash->lock, flags); ++ ++ writel(ESPI_FLASH_RX_CTRL_SERV_PEND, espi->regs + ESPI_FLASH_RX_CTRL); ++ flash->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_flash_put_tx(struct file *fp, ++ struct ast2600_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2600_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(flash, struct ast2600_espi, flash); ++ ++ if (!mutex_trylock(&flash->tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_FLASH_TX_CTRL); ++ if (reg & ESPI_FLASH_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (flash->dma.enable) { ++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_FLASH_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_FLASH_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_LEN, len) ++ | ESPI_FLASH_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_FLASH_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2600_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2600_espi_flash *flash; ++ struct aspeed_espi_ioc ioc; ++ ++ flash = container_of(fp->private_data, struct ast2600_espi_flash, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_FLASH_GET_RX: ++ return ast2600_espi_flash_get_rx(fp, flash, &ioc); ++ case ASPEED_ESPI_FLASH_PUT_TX: ++ return ast2600_espi_flash_put_tx(fp, flash, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2600_espi_flash_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2600_espi_flash_ioctl, ++}; ++ ++static void ast2600_espi_flash_isr(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_flash *flash; ++ unsigned long flags; ++ uint32_t sts; ++ ++ flash = &espi->flash; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ ++ if (sts & ESPI_INT_STS_FLASH_RX_CMPLT) { ++ writel(ESPI_INT_STS_FLASH_RX_CMPLT, espi->regs + ESPI_INT_STS); ++ ++ spin_lock_irqsave(&flash->lock, flags); ++ flash->rx_ready = true; ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ wake_up_interruptible(&flash->wq); ++ } ++} ++ ++static void ast2600_espi_flash_reset(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_flash *flash; ++ uint32_t reg; ++ ++ flash = &espi->flash; ++ ++ writel(ESPI_INT_EN_FLASH, espi->regs + ESPI_INT_EN_CLR); ++ writel(ESPI_INT_STS_FLASH, espi->regs + ESPI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_FLASH_TX_SW_RST ++ | ESPI_CTRL_FLASH_RX_SW_RST ++ | ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN ++ | ESPI_CTRL_FLASH_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CTRL_FLASH_TX_SW_RST | ESPI_CTRL_FLASH_RX_SW_RST); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ reg = readl(espi->regs + ESPI_CTRL) & ~ESPI_CTRL_FLASH_SAFS_MODE; ++ reg |= FIELD_PREP(ESPI_CTRL_FLASH_SAFS_MODE, flash->safs.mode); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (flash->safs.mode == SAFS_MODE_MIX) { ++ reg = FIELD_PREP(ESPI_FLASH_SAFS_TADDR_BASE, flash->safs.taddr >> 24) ++ | FIELD_PREP(ESPI_FLASH_SAFS_TADDR_MASK, (~(flash->safs.size - 1)) >> 24); ++ writel(reg, espi->regs + ESPI_FLASH_SAFS_TADDR); ++ } ++ ++ if (flash->dma.enable) { ++ writel(flash->dma.tx_addr, espi->regs + ESPI_FLASH_TX_DMA); ++ writel(flash->dma.rx_addr, espi->regs + ESPI_FLASH_RX_DMA); ++ ++ reg = readl(espi->regs + ESPI_CTRL) ++ | ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CTRL); ++ } ++ ++ writel(ESPI_INT_EN_FLASH_RX_CMPLT, espi->regs + ESPI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SW_RDY; ++ writel(reg, espi->regs + ESPI_CTRL); ++} ++ ++static int ast2600_espi_flash_probe(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_flash *flash; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ init_waitqueue_head(&flash->wq); ++ ++ spin_lock_init(&flash->lock); ++ ++ mutex_init(&flash->tx_mtx); ++ mutex_init(&flash->rx_mtx); ++ ++ flash->safs.mode = SAFS_MODE_HW; ++ ++ of_property_read_u32(dev->of_node, "flash-safs-mode", &flash->safs.mode); ++ if (flash->safs.mode == SAFS_MODE_MIX) { ++ rc = of_property_read_u32(dev->of_node, "flash-safs-tgt-addr", &flash->safs.taddr); ++ if (rc || !IS_ALIGNED(flash->safs.taddr, FLASH_SAFS_ALIGN)) { ++ dev_err(dev, "cannot get 16MB-aligned SAFS target address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "flash-safs-size", &flash->safs.size); ++ if (rc || !IS_ALIGNED(flash->safs.size, FLASH_SAFS_ALIGN)) { ++ dev_err(dev, "cannot get 16MB-aligned SAFS size\n"); ++ return -ENODEV; ++ } ++ } ++ ++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode"); ++ if (flash->dma.enable) { ++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL); ++ if (!flash->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL); ++ if (!flash->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA RX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ flash->mdev.parent = dev; ++ flash->mdev.minor = MISC_DYNAMIC_MINOR; ++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash", DEVICE_NAME); ++ flash->mdev.fops = &ast2600_espi_flash_fops; ++ rc = misc_register(&flash->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", flash->mdev.name); ++ return rc; ++ } ++ ++ ast2600_espi_flash_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2600_espi_flash_remove(struct ast2600_espi *espi) ++{ ++ struct ast2600_espi_flash *flash; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ writel(ESPI_INT_EN_FLASH, espi->regs + ESPI_INT_EN_CLR); ++ ++ reg = readl(espi->regs + ESPI_CTRL); ++ reg &= ~(ESPI_CTRL_FLASH_TX_DMA_EN ++ | ESPI_CTRL_FLASH_RX_DMA_EN ++ | ESPI_CTRL_FLASH_SW_RDY); ++ writel(reg, espi->regs + ESPI_CTRL); ++ ++ if (flash->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr); ++ } ++ ++ mutex_destroy(&flash->tx_mtx); ++ mutex_destroy(&flash->rx_mtx); ++ ++ misc_deregister(&flash->mdev); ++ ++ return 0; ++} ++ ++/* global control */ ++static irqreturn_t ast2600_espi_isr(int irq, void *arg) ++{ ++ struct ast2600_espi *espi; ++ uint32_t sts; ++ ++ espi = (struct ast2600_espi *)arg; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ if (!sts) ++ return IRQ_NONE; ++ ++ if (sts & ESPI_INT_STS_PERIF) ++ ast2600_espi_perif_isr(espi); ++ ++ if (sts & ESPI_INT_STS_VW) ++ ast2600_espi_vw_isr(espi); ++ ++ if (sts & ESPI_INT_STS_OOB) ++ ast2600_espi_oob_isr(espi); ++ ++ if (sts & ESPI_INT_STS_FLASH) ++ ast2600_espi_flash_isr(espi); ++ ++ if (sts & ESPI_INT_STS_RST_DEASSERT) { ++ /* this will clear all interrupt enable and status */ ++ reset_control_assert(espi->rst); ++ reset_control_deassert(espi->rst); ++ ++ ast2600_espi_perif_sw_reset(espi); ++ ast2600_espi_perif_reset(espi); ++ ast2600_espi_vw_reset(espi); ++ ast2600_espi_oob_reset(espi); ++ ast2600_espi_flash_reset(espi); ++ ++ /* re-enable eSPI_RESET# interrupt */ ++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int ast2600_espi_probe(struct platform_device *pdev) ++{ ++ struct ast2600_espi *espi; ++ struct resource *res; ++ struct device *dev; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL); ++ if (!espi) ++ return -ENOMEM; ++ ++ espi->dev = dev; ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return rc; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "cannot get resource\n"); ++ return -ENODEV; ++ } ++ ++ espi->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(espi->regs)) { ++ dev_err(dev, "cannot map registers\n"); ++ return PTR_ERR(espi->regs); ++ } ++ ++ espi->irq = platform_get_irq(pdev, 0); ++ if (espi->irq < 0) { ++ dev_err(dev, "cannot get IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ espi->rst = devm_reset_control_get_exclusive_by_index(dev, 0); ++ if (IS_ERR(espi->rst)) { ++ dev_err(dev, "cannot get reset control\n"); ++ return PTR_ERR(espi->rst); ++ } ++ ++ espi->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(espi->clk)) { ++ dev_err(dev, "cannot get clock control\n"); ++ return PTR_ERR(espi->clk); ++ } ++ ++ rc = clk_prepare_enable(espi->clk); ++ if (rc) { ++ dev_err(dev, "cannot enable clocks\n"); ++ return rc; ++ } ++ ++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN_CLR); ++ ++ rc = ast2600_espi_perif_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init peripheral channel, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = ast2600_espi_vw_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init vw channel, rc=%d\n", rc); ++ goto err_remove_perif; ++ } ++ ++ rc = ast2600_espi_oob_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init oob channel, rc=%d\n", rc); ++ goto err_remove_vw; ++ } ++ ++ rc = ast2600_espi_flash_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init flash channel, rc=%d\n", rc); ++ goto err_remove_oob; ++ } ++ ++ rc = devm_request_irq(dev, espi->irq, ast2600_espi_isr, 0, dev_name(dev), espi); ++ if (rc) { ++ dev_err(dev, "cannot request IRQ\n"); ++ goto err_remove_flash; ++ } ++ ++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN); ++ ++ platform_set_drvdata(pdev, espi); ++ ++ dev_info(dev, "module loaded\n"); ++ ++ return 0; ++ ++err_remove_flash: ++ ast2600_espi_flash_remove(espi); ++err_remove_oob: ++ ast2600_espi_oob_remove(espi); ++err_remove_vw: ++ ast2600_espi_vw_remove(espi); ++err_remove_perif: ++ ast2600_espi_perif_remove(espi); ++ ++ return rc; ++} ++ ++static void ast2600_espi_remove(struct platform_device *pdev) ++{ ++ struct ast2600_espi *espi; ++ struct device *dev; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = platform_get_drvdata(pdev); ++ ++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN_CLR); ++ ++ rc = ast2600_espi_perif_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2600_espi_vw_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2600_espi_oob_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2600_espi_flash_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++} ++ ++static const struct of_device_id ast2600_espi_of_matches[] = { ++ { .compatible = "aspeed,ast2600-espi" }, ++ { }, ++}; ++ ++static struct platform_driver ast2600_espi_driver = { ++ .driver = { ++ .name = "ast2600-espi", ++ .of_match_table = ast2600_espi_of_matches, ++ }, ++ .probe = ast2600_espi_probe, ++ .remove = ast2600_espi_remove, ++}; ++ ++module_platform_driver(ast2600_espi_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_DESCRIPTION("Control of AST2600 eSPI Device"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/ast2600-espi.h b/drivers/soc/aspeed/ast2600-espi.h +--- a/drivers/soc/aspeed/ast2600-espi.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2600-espi.h 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,297 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#ifndef _AST2600_ESPI_H_ ++#define _AST2600_ESPI_H_ ++ ++#include ++#include "aspeed-espi-comm.h" ++ ++/* registers */ ++#define ESPI_CTRL 0x000 ++#define ESPI_CTRL_FLASH_TX_SW_RST BIT(31) ++#define ESPI_CTRL_FLASH_RX_SW_RST BIT(30) ++#define ESPI_CTRL_OOB_TX_SW_RST BIT(29) ++#define ESPI_CTRL_OOB_RX_SW_RST BIT(28) ++#define ESPI_CTRL_PERIF_NP_TX_SW_RST BIT(27) ++#define ESPI_CTRL_PERIF_NP_RX_SW_RST BIT(26) ++#define ESPI_CTRL_PERIF_PC_TX_SW_RST BIT(25) ++#define ESPI_CTRL_PERIF_PC_RX_SW_RST BIT(24) ++#define ESPI_CTRL_FLASH_TX_DMA_EN BIT(23) ++#define ESPI_CTRL_FLASH_RX_DMA_EN BIT(22) ++#define ESPI_CTRL_OOB_TX_DMA_EN BIT(21) ++#define ESPI_CTRL_OOB_RX_DMA_EN BIT(20) ++#define ESPI_CTRL_PERIF_NP_TX_DMA_EN BIT(19) ++#define ESPI_CTRL_PERIF_PC_TX_DMA_EN BIT(17) ++#define ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16) ++#define ESPI_CTRL_FLASH_SAFS_MODE GENMASK(11, 10) ++#define ESPI_CTRL_VW_GPIO_SW BIT(9) ++#define ESPI_CTRL_FLASH_SW_RDY BIT(7) ++#define ESPI_CTRL_OOB_SW_RDY BIT(4) ++#define ESPI_CTRL_VW_SW_RDY BIT(3) ++#define ESPI_CTRL_PERIF_SW_RDY BIT(1) ++#define ESPI_STS 0x004 ++#define ESPI_INT_STS 0x008 ++#define ESPI_INT_STS_RST_DEASSERT BIT(31) ++#define ESPI_INT_STS_OOB_RX_TMOUT BIT(23) ++#define ESPI_INT_STS_VW_SYSEVT1 BIT(22) ++#define ESPI_INT_STS_FLASH_TX_ERR BIT(21) ++#define ESPI_INT_STS_OOB_TX_ERR BIT(20) ++#define ESPI_INT_STS_FLASH_TX_ABT BIT(19) ++#define ESPI_INT_STS_OOB_TX_ABT BIT(18) ++#define ESPI_INT_STS_PERIF_NP_TX_ABT BIT(17) ++#define ESPI_INT_STS_PERIF_PC_TX_ABT BIT(16) ++#define ESPI_INT_STS_FLASH_RX_ABT BIT(15) ++#define ESPI_INT_STS_OOB_RX_ABT BIT(14) ++#define ESPI_INT_STS_PERIF_NP_RX_ABT BIT(13) ++#define ESPI_INT_STS_PERIF_PC_RX_ABT BIT(12) ++#define ESPI_INT_STS_PERIF_NP_TX_ERR BIT(11) ++#define ESPI_INT_STS_PERIF_PC_TX_ERR BIT(10) ++#define ESPI_INT_STS_VW_GPIO BIT(9) ++#define ESPI_INT_STS_VW_SYSEVT BIT(8) ++#define ESPI_INT_STS_FLASH_TX_CMPLT BIT(7) ++#define ESPI_INT_STS_FLASH_RX_CMPLT BIT(6) ++#define ESPI_INT_STS_OOB_TX_CMPLT BIT(5) ++#define ESPI_INT_STS_OOB_RX_CMPLT BIT(4) ++#define ESPI_INT_STS_PERIF_NP_TX_CMPLT BIT(3) ++#define ESPI_INT_STS_PERIF_PC_TX_CMPLT BIT(1) ++#define ESPI_INT_STS_PERIF_PC_RX_CMPLT BIT(0) ++#define ESPI_INT_EN 0x00c ++#define ESPI_INT_EN_RST_DEASSERT BIT(31) ++#define ESPI_INT_EN_OOB_RX_TMOUT BIT(23) ++#define ESPI_INT_EN_VW_SYSEVT1 BIT(22) ++#define ESPI_INT_EN_FLASH_TX_ERR BIT(21) ++#define ESPI_INT_EN_OOB_TX_ERR BIT(20) ++#define ESPI_INT_EN_FLASH_TX_ABT BIT(19) ++#define ESPI_INT_EN_OOB_TX_ABT BIT(18) ++#define ESPI_INT_EN_PERIF_NP_TX_ABT BIT(17) ++#define ESPI_INT_EN_PERIF_PC_TX_ABT BIT(16) ++#define ESPI_INT_EN_FLASH_RX_ABT BIT(15) ++#define ESPI_INT_EN_OOB_RX_ABT BIT(14) ++#define ESPI_INT_EN_PERIF_NP_RX_ABT BIT(13) ++#define ESPI_INT_EN_PERIF_PC_RX_ABT BIT(12) ++#define ESPI_INT_EN_PERIF_NP_TX_ERR BIT(11) ++#define ESPI_INT_EN_PERIF_PC_TX_ERR BIT(10) ++#define ESPI_INT_EN_VW_GPIO BIT(9) ++#define ESPI_INT_EN_VW_SYSEVT BIT(8) ++#define ESPI_INT_EN_FLASH_TX_CMPLT BIT(7) ++#define ESPI_INT_EN_FLASH_RX_CMPLT BIT(6) ++#define ESPI_INT_EN_OOB_TX_CMPLT BIT(5) ++#define ESPI_INT_EN_OOB_RX_CMPLT BIT(4) ++#define ESPI_INT_EN_PERIF_NP_TX_CMPLT BIT(3) ++#define ESPI_INT_EN_PERIF_PC_TX_CMPLT BIT(1) ++#define ESPI_INT_EN_PERIF_PC_RX_CMPLT BIT(0) ++#define ESPI_PERIF_PC_RX_DMA 0x010 ++#define ESPI_PERIF_PC_RX_CTRL 0x014 ++#define ESPI_PERIF_PC_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_PERIF_PC_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_PC_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_PC_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_PC_RX_DATA 0x018 ++#define ESPI_PERIF_PC_TX_DMA 0x020 ++#define ESPI_PERIF_PC_TX_CTRL 0x024 ++#define ESPI_PERIF_PC_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_PERIF_PC_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_PC_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_PC_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_PC_TX_DATA 0x028 ++#define ESPI_PERIF_NP_TX_DMA 0x030 ++#define ESPI_PERIF_NP_TX_CTRL 0x034 ++#define ESPI_PERIF_NP_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_PERIF_NP_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_PERIF_NP_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_PERIF_NP_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_PERIF_NP_TX_DATA 0x038 ++#define ESPI_OOB_RX_DMA 0x040 ++#define ESPI_OOB_RX_CTRL 0x044 ++#define ESPI_OOB_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_OOB_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_OOB_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_OOB_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_OOB_RX_DATA 0x048 ++#define ESPI_OOB_TX_DMA 0x050 ++#define ESPI_OOB_TX_CTRL 0x054 ++#define ESPI_OOB_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_OOB_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_OOB_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_OOB_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_OOB_TX_DATA 0x058 ++#define ESPI_FLASH_RX_DMA 0x060 ++#define ESPI_FLASH_RX_CTRL 0x064 ++#define ESPI_FLASH_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_FLASH_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_FLASH_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_FLASH_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_FLASH_RX_DATA 0x068 ++#define ESPI_FLASH_TX_DMA 0x070 ++#define ESPI_FLASH_TX_CTRL 0x074 ++#define ESPI_FLASH_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_FLASH_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_FLASH_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_FLASH_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_FLASH_TX_DATA 0x078 ++#define ESPI_CTRL2 0x080 ++#define ESPI_CTRL2_VW_TX_SORT BIT(30) ++#define ESPI_CTRL2_MCYC_RD_DIS_WDT BIT(11) ++#define ESPI_CTRL2_MCYC_WR_DIS_WDT BIT(10) ++#define ESPI_CTRL2_MCYC_RD_DIS BIT(6) ++#define ESPI_CTRL2_MMBI_RD_DIS ESPI_CTRL2_MCYC_RD_DIS ++#define ESPI_CTRL2_MCYC_WR_DIS BIT(4) ++#define ESPI_CTRL2_MMBI_WR_DIS ESPI_CTRL2_MCYC_WR_DIS ++#define ESPI_PERIF_MCYC_SADDR 0x084 ++#define ESPI_PERIF_MMBI_SADDR ESPI_PERIF_MCYC_SADDR ++#define ESPI_PERIF_MCYC_TADDR 0x088 ++#define ESPI_PERIF_MMBI_TADDR ESPI_PERIF_MCYC_TADDR ++#define ESPI_PERIF_MCYC_MASK 0x08c ++#define ESPI_PERIF_MMBI_MASK ESPI_PERIF_MCYC_MASK ++#define ESPI_FLASH_SAFS_TADDR 0x090 ++#define ESPI_FLASH_SAFS_TADDR_BASE GENMASK(31, 24) ++#define ESPI_FLASH_SAFS_TADDR_MASK GENMASK(15, 8) ++#define ESPI_VW_SYSEVT_INT_EN 0x094 ++#define ESPI_VW_SYSEVT 0x098 ++#define ESPI_VW_SYSEVT_HOST_RST_ACK BIT(27) ++#define ESPI_VW_SYSEVT_RST_CPU_INIT BIT(26) ++#define ESPI_VW_SYSEVT_SLV_BOOT_STS BIT(23) ++#define ESPI_VW_SYSEVT_NON_FATAL_ERR BIT(22) ++#define ESPI_VW_SYSEVT_FATAL_ERR BIT(21) ++#define ESPI_VW_SYSEVT_SLV_BOOT_DONE BIT(20) ++#define ESPI_VW_SYSEVT_OOB_RST_ACK BIT(16) ++#define ESPI_VW_SYSEVT_NMI_OUT BIT(10) ++#define ESPI_VW_SYSEVT_SMI_OUT BIT(9) ++#define ESPI_VW_SYSEVT_HOST_RST_WARN BIT(8) ++#define ESPI_VW_SYSEVT_OOB_RST_WARN BIT(6) ++#define ESPI_VW_SYSEVT_PLTRSTN BIT(5) ++#define ESPI_VW_SYSEVT_SUSPEND BIT(4) ++#define ESPI_VW_SYSEVT_S5_SLEEP BIT(2) ++#define ESPI_VW_SYSEVT_S4_SLEEP BIT(1) ++#define ESPI_VW_SYSEVT_S3_SLEEP BIT(0) ++#define ESPI_VW_GPIO_VAL 0x09c ++#define ESPI_GEN_CAP_N_CONF 0x0a0 ++#define ESPI_CH0_CAP_N_CONF 0x0a4 ++#define ESPI_CH1_CAP_N_CONF 0x0a8 ++#define ESPI_CH2_CAP_N_CONF 0x0ac ++#define ESPI_CH3_CAP_N_CONF 0x0b0 ++#define ESPI_CH3_CAP_N_CONF2 0x0b4 ++#define ESPI_VW_GPIO_DIR 0x0c0 ++#define ESPI_VW_GPIO_GRP 0x0c4 ++#define ESPI_INT_EN_CLR 0x0fc ++#define ESPI_VW_SYSEVT1_INT_EN 0x100 ++#define ESPI_VW_SYSEVT1 0x104 ++#define ESPI_VW_SYSEVT1_SUSPEND_ACK BIT(20) ++#define ESPI_VW_SYSEVT1_SUSPEND_WARN BIT(0) ++#define ESPI_VW_SYSEVT_INT_T0 0x110 ++#define ESPI_VW_SYSEVT_INT_T1 0x114 ++#define ESPI_VW_SYSEVT_INT_T2 0x118 ++#define ESPI_VW_SYSEVT_INT_STS 0x11c ++#define ESPI_VW_SYSEVT1_INT_T0 0x120 ++#define ESPI_VW_SYSEVT1_INT_T1 0x124 ++#define ESPI_VW_SYSEVT1_INT_T2 0x128 ++#define ESPI_VW_SYSEVT1_INT_STS 0x12c ++#define ESPI_OOB_RX_DESC_NUM 0x130 ++#define ESPI_OOB_RX_DESC_RPTR 0x134 ++#define ESPI_OOB_RX_DESC_RPTR_UPDATE BIT(31) ++#define ESPI_OOB_RX_DESC_RPTR_RP GENMASK(11, 0) ++#define ESPI_OOB_RX_DESC_WPTR 0x138 ++#define ESPI_OOB_RX_DESC_WPTR_RECV_EN BIT(31) ++#define ESPI_OOB_RX_DESC_WPTR_SP GENMASK(27, 16) ++#define ESPI_OOB_RX_DESC_WPTR_WP GENMASK(11, 0) ++#define ESPI_OOB_TX_DESC_NUM 0x140 ++#define ESPI_OOB_TX_DESC_RPTR 0x144 ++#define ESPI_OOB_TX_DESC_RPTR_UPDATE BIT(31) ++#define ESPI_OOB_TX_DESC_WPTR 0x148 ++#define ESPI_OOB_TX_DESC_WPTR_SEND_EN BIT(31) ++#define ESPI_MMBI_CTRL 0x800 ++#define ESPI_MMBI_CTRL_INST_SZ GENMASK(10, 8) ++#define ESPI_MMBI_CTRL_TOTAL_SZ GENMASK(6, 4) ++#define ESPI_MMBI_CTRL_EN BIT(0) ++#define ESPI_MMBI_INT_STS 0x808 ++#define ESPI_MMBI_INT_EN 0x80c ++#define ESPI_MMBI_HOST_RWP(x) (0x810 + ((x) << 3)) ++ ++/* collect ESPI_INT_EN bits for convenience */ ++#define ESPI_INT_EN_PERIF \ ++ (ESPI_INT_EN_PERIF_NP_TX_ABT | \ ++ ESPI_INT_EN_PERIF_PC_TX_ABT | \ ++ ESPI_INT_EN_PERIF_NP_RX_ABT | \ ++ ESPI_INT_EN_PERIF_PC_RX_ABT | \ ++ ESPI_INT_EN_PERIF_NP_TX_ERR | \ ++ ESPI_INT_EN_PERIF_PC_TX_ERR | \ ++ ESPI_INT_EN_PERIF_NP_TX_CMPLT | \ ++ ESPI_INT_EN_PERIF_PC_TX_CMPLT | \ ++ ESPI_INT_EN_PERIF_PC_RX_CMPLT) ++ ++#define ESPI_INT_EN_VW \ ++ (ESPI_INT_EN_VW_SYSEVT1 | \ ++ ESPI_INT_EN_VW_GPIO | \ ++ ESPI_INT_EN_VW_SYSEVT) ++ ++#define ESPI_INT_EN_OOB \ ++ (ESPI_INT_EN_OOB_RX_TMOUT | \ ++ ESPI_INT_EN_OOB_TX_ERR | \ ++ ESPI_INT_EN_OOB_TX_ABT | \ ++ ESPI_INT_EN_OOB_RX_ABT | \ ++ ESPI_INT_EN_OOB_TX_CMPLT | \ ++ ESPI_INT_EN_OOB_RX_CMPLT) ++ ++#define ESPI_INT_EN_FLASH \ ++ (ESPI_INT_EN_FLASH_TX_ERR | \ ++ ESPI_INT_EN_FLASH_TX_ABT | \ ++ ESPI_INT_EN_FLASH_RX_ABT | \ ++ ESPI_INT_EN_FLASH_TX_CMPLT | \ ++ ESPI_INT_EN_FLASH_RX_CMPLT) ++ ++/* collect ESPI_INT_STS bits for convenience */ ++#define ESPI_INT_STS_PERIF \ ++ (ESPI_INT_STS_PERIF_NP_TX_ABT | \ ++ ESPI_INT_STS_PERIF_PC_TX_ABT | \ ++ ESPI_INT_STS_PERIF_NP_RX_ABT | \ ++ ESPI_INT_STS_PERIF_PC_RX_ABT | \ ++ ESPI_INT_STS_PERIF_NP_TX_ERR | \ ++ ESPI_INT_STS_PERIF_PC_TX_ERR | \ ++ ESPI_INT_STS_PERIF_NP_TX_CMPLT | \ ++ ESPI_INT_STS_PERIF_PC_TX_CMPLT | \ ++ ESPI_INT_STS_PERIF_PC_RX_CMPLT) ++ ++#define ESPI_INT_STS_VW \ ++ (ESPI_INT_STS_VW_SYSEVT1 | \ ++ ESPI_INT_STS_VW_GPIO | \ ++ ESPI_INT_STS_VW_SYSEVT) ++ ++#define ESPI_INT_STS_OOB \ ++ (ESPI_INT_STS_OOB_RX_TMOUT | \ ++ ESPI_INT_STS_OOB_TX_ERR | \ ++ ESPI_INT_STS_OOB_TX_ABT | \ ++ ESPI_INT_STS_OOB_RX_ABT | \ ++ ESPI_INT_STS_OOB_TX_CMPLT | \ ++ ESPI_INT_STS_OOB_RX_CMPLT) ++ ++#define ESPI_INT_STS_FLASH \ ++ (ESPI_INT_STS_FLASH_TX_ERR | \ ++ ESPI_INT_STS_FLASH_TX_ABT | \ ++ ESPI_INT_STS_FLASH_RX_ABT | \ ++ ESPI_INT_STS_FLASH_TX_CMPLT | \ ++ ESPI_INT_STS_FLASH_RX_CMPLT) ++ ++/* consistent with DTS property "flash-safs-mode" */ ++enum ast2600_safs_mode { ++ SAFS_MODE_MIX = 0x0, ++ SAFS_MODE_SW, ++ SAFS_MODE_HW, ++ SAFS_MODES, ++}; ++ ++/* consistent with DTS property "perif-mmbi-instance-size" */ ++enum ast2600_mmbi_instance_size { ++ MMBI_INST_SIZE_8KB = 0x0, ++ MMBI_INST_SIZE_16KB, ++ MMBI_INST_SIZE_32KB, ++ MMBI_INST_SIZE_64KB, ++ MMBI_INST_SIZE_128KB, ++ MMBI_INST_SIZE_256KB, ++ MMBI_INST_SIZE_512KB, ++ MMBI_INST_SIZE_1024KB, ++ MMBI_INST_SIZE_TYPES, ++}; ++ ++#endif +diff --git a/drivers/soc/aspeed/ast2600-otp.c b/drivers/soc/aspeed/ast2600-otp.c +--- a/drivers/soc/aspeed/ast2600-otp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2600-otp.c 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,638 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) ASPEED Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ASPEED_REVISION_ID0 0x04 ++#define ASPEED_REVISION_ID1 0x14 ++#define ID0_AST2600A0 0x05000303 ++#define ID1_AST2600A0 0x05000303 ++#define ID0_AST2600A1 0x05010303 ++#define ID1_AST2600A1 0x05010303 ++#define ID0_AST2600A2 0x05010303 ++#define ID1_AST2600A2 0x05020303 ++#define ID0_AST2600A3 0x05030303 ++#define ID1_AST2600A3 0x05030303 ++#define ID0_AST2620A1 0x05010203 ++#define ID1_AST2620A1 0x05010203 ++#define ID0_AST2620A2 0x05010203 ++#define ID1_AST2620A2 0x05020203 ++#define ID0_AST2620A3 0x05030203 ++#define ID1_AST2620A3 0x05030203 ++#define ID0_AST2605A2 0x05010103 ++#define ID1_AST2605A2 0x05020103 ++#define ID0_AST2605A3 0x05030103 ++#define ID1_AST2605A3 0x05030103 ++#define ID0_AST2625A3 0x05030403 ++#define ID1_AST2625A3 0x05030403 ++ ++#define OTP_PROTECT_KEY 0x0 ++#define OTP_PASSWD 0x349fe38a ++#define OTP_COMMAND 0x4 ++#define OTP_TIMING 0x8 ++#define OTP_ADDR 0x10 ++#define OTP_STATUS 0x14 ++#define OTP_COMPARE_1 0x20 ++#define OTP_COMPARE_2 0x24 ++#define OTP_COMPARE_3 0x28 ++#define OTP_COMPARE_4 0x2c ++#define SW_REV_ID0 0x68 ++#define SW_REV_ID1 0x6c ++#define SEC_KEY_NUM 0x78 ++#define RETRY 20 ++ ++struct aspeed_otp { ++ struct miscdevice miscdev; ++ void __iomem *reg_base; ++ bool is_open; ++ u32 otp_ver; ++ u32 *data; ++}; ++ ++static DEFINE_SPINLOCK(otp_state_lock); ++ ++static inline u32 aspeed_otp_read(struct aspeed_otp *ctx, u32 reg) ++{ ++ int val; ++ ++ val = readl(ctx->reg_base + reg); ++ // printk("read:reg = 0x%08x, val = 0x%08x\n", reg, val); ++ return val; ++} ++ ++static inline void aspeed_otp_write(struct aspeed_otp *ctx, u32 val, u32 reg) ++{ ++ // printk("write:reg = 0x%08x, val = 0x%08x\n", reg, val); ++ writel(val, ctx->reg_base + reg); ++} ++ ++static uint32_t chip_version(u32 revid0, u32 revid1) ++{ ++ if (revid0 == ID0_AST2600A0 && revid1 == ID1_AST2600A0) { ++ /* AST2600-A0 */ ++ return OTP_A0; ++ } else if (revid0 == ID0_AST2600A1 && revid1 == ID1_AST2600A1) { ++ /* AST2600-A1 */ ++ return OTP_A1; ++ } else if (revid0 == ID0_AST2600A2 && revid1 == ID1_AST2600A2) { ++ /* AST2600-A2 */ ++ return OTP_A2; ++ } else if (revid0 == ID0_AST2600A3 && revid1 == ID1_AST2600A3) { ++ /* AST2600-A3 */ ++ return OTP_A3; ++ } else if (revid0 == ID0_AST2620A1 && revid1 == ID1_AST2620A1) { ++ /* AST2620-A1 */ ++ return OTP_A1; ++ } else if (revid0 == ID0_AST2620A2 && revid1 == ID1_AST2620A2) { ++ /* AST2620-A2 */ ++ return OTP_A2; ++ } else if (revid0 == ID0_AST2620A3 && revid1 == ID1_AST2620A3) { ++ /* AST2620-A3 */ ++ return OTP_A3; ++ } else if (revid0 == ID0_AST2605A2 && revid1 == ID1_AST2605A2) { ++ /* AST2605-A2 */ ++ return OTP_A2; ++ } else if (revid0 == ID0_AST2605A3 && revid1 == ID1_AST2605A3) { ++ /* AST2605-A3 */ ++ return OTP_A3; ++ } else if (revid0 == ID0_AST2625A3 && revid1 == ID1_AST2625A3) { ++ /* AST2605-A3 */ ++ return OTP_A3; ++ } ++ return -1; ++} ++ ++static void wait_complete(struct aspeed_otp *ctx) ++{ ++ int reg; ++ int i = 0; ++ ++ do { ++ reg = aspeed_otp_read(ctx, OTP_STATUS); ++ if ((reg & 0x6) == 0x6) ++ i++; ++ } while (i != 2); ++} ++ ++static void otp_write(struct aspeed_otp *ctx, u32 otp_addr, u32 val) ++{ ++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //write address ++ aspeed_otp_write(ctx, val, OTP_COMPARE_1); //write val ++ aspeed_otp_write(ctx, 0x23b1e362, OTP_COMMAND); //write command ++ wait_complete(ctx); ++} ++ ++static void otp_soak(struct aspeed_otp *ctx, int soak) ++{ ++ if (ctx->otp_ver == OTP_A2 || ctx->otp_ver == OTP_A3) { ++ switch (soak) { ++ case 0: //default ++ otp_write(ctx, 0x3000, 0x0); // Write MRA ++ otp_write(ctx, 0x5000, 0x0); // Write MRB ++ otp_write(ctx, 0x1000, 0x0); // Write MR ++ break; ++ case 1: //normal program ++ otp_write(ctx, 0x3000, 0x1320); // Write MRA ++ otp_write(ctx, 0x5000, 0x1008); // Write MRB ++ otp_write(ctx, 0x1000, 0x0024); // Write MR ++ aspeed_otp_write(ctx, 0x04191388, OTP_TIMING); // 200us ++ break; ++ case 2: //soak program ++ otp_write(ctx, 0x3000, 0x1320); // Write MRA ++ otp_write(ctx, 0x5000, 0x0007); // Write MRB ++ otp_write(ctx, 0x1000, 0x0100); // Write MR ++ aspeed_otp_write(ctx, 0x04193a98, OTP_TIMING); // 600us ++ break; ++ } ++ } else { ++ switch (soak) { ++ case 0: //default ++ otp_write(ctx, 0x3000, 0x0); // Write MRA ++ otp_write(ctx, 0x5000, 0x0); // Write MRB ++ otp_write(ctx, 0x1000, 0x0); // Write MR ++ break; ++ case 1: //normal program ++ otp_write(ctx, 0x3000, 0x4021); // Write MRA ++ otp_write(ctx, 0x5000, 0x302f); // Write MRB ++ otp_write(ctx, 0x1000, 0x4020); // Write MR ++ aspeed_otp_write(ctx, 0x04190760, OTP_TIMING); // 75us ++ break; ++ case 2: //soak program ++ otp_write(ctx, 0x3000, 0x4021); // Write MRA ++ otp_write(ctx, 0x5000, 0x1027); // Write MRB ++ otp_write(ctx, 0x1000, 0x4820); // Write MR ++ aspeed_otp_write(ctx, 0x041930d4, OTP_TIMING); // 500us ++ break; ++ } ++ } ++ ++ wait_complete(ctx); ++} ++ ++static int verify_bit(struct aspeed_otp *ctx, u32 otp_addr, int bit_offset, int value) ++{ ++ u32 ret[2]; ++ ++ if (otp_addr % 2 == 0) ++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //Read address ++ else ++ aspeed_otp_write(ctx, otp_addr - 1, OTP_ADDR); //Read address ++ ++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read ++ wait_complete(ctx); ++ ret[0] = aspeed_otp_read(ctx, OTP_COMPARE_1); ++ ret[1] = aspeed_otp_read(ctx, OTP_COMPARE_2); ++ ++ if (otp_addr % 2 == 0) { ++ if (((ret[0] >> bit_offset) & 1) == value) ++ return 0; ++ else ++ return -1; ++ } else { ++ if (((ret[1] >> bit_offset) & 1) == value) ++ return 0; ++ else ++ return -1; ++ } ++} ++ ++static void otp_prog(struct aspeed_otp *ctx, u32 otp_addr, u32 prog_bit) ++{ ++ otp_write(ctx, 0x0, prog_bit); ++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //write address ++ aspeed_otp_write(ctx, prog_bit, OTP_COMPARE_1); //write data ++ aspeed_otp_write(ctx, 0x23b1e364, OTP_COMMAND); //write command ++ wait_complete(ctx); ++} ++ ++static void _otp_prog_bit(struct aspeed_otp *ctx, u32 value, u32 prog_address, u32 bit_offset) ++{ ++ int prog_bit; ++ ++ if (prog_address % 2 == 0) { ++ if (value) ++ prog_bit = ~(0x1 << bit_offset); ++ else ++ return; ++ } else { ++ if (ctx->otp_ver != OTP_A3) ++ prog_address |= 1 << 15; ++ if (!value) ++ prog_bit = 0x1 << bit_offset; ++ else ++ return; ++ } ++ otp_prog(ctx, prog_address, prog_bit); ++} ++ ++static int otp_prog_bit(struct aspeed_otp *ctx, u32 value, u32 prog_address, u32 bit_offset) ++{ ++ int pass; ++ int i; ++ ++ otp_soak(ctx, 1); ++ _otp_prog_bit(ctx, value, prog_address, bit_offset); ++ pass = 0; ++ ++ for (i = 0; i < RETRY; i++) { ++ if (verify_bit(ctx, prog_address, bit_offset, value) != 0) { ++ otp_soak(ctx, 2); ++ _otp_prog_bit(ctx, value, prog_address, bit_offset); ++ if (verify_bit(ctx, prog_address, bit_offset, value) != 0) { ++ otp_soak(ctx, 1); ++ } else { ++ pass = 1; ++ break; ++ } ++ } else { ++ pass = 1; ++ break; ++ } ++ } ++ otp_soak(ctx, 0); ++ return pass; ++} ++ ++static void otp_read_conf_dw(struct aspeed_otp *ctx, u32 offset, u32 *buf) ++{ ++ u32 config_offset; ++ ++ config_offset = 0x800; ++ config_offset |= (offset / 8) * 0x200; ++ config_offset |= (offset % 8) * 0x2; ++ ++ aspeed_otp_write(ctx, config_offset, OTP_ADDR); //Read address ++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read ++ wait_complete(ctx); ++ buf[0] = aspeed_otp_read(ctx, OTP_COMPARE_1); ++} ++ ++static void otp_read_conf(struct aspeed_otp *ctx, u32 offset, u32 len) ++{ ++ int i, j; ++ ++ otp_soak(ctx, 0); ++ for (i = offset, j = 0; j < len; i++, j++) ++ otp_read_conf_dw(ctx, i, &ctx->data[j]); ++} ++ ++static void otp_read_data_2dw(struct aspeed_otp *ctx, u32 offset, u32 *buf) ++{ ++ aspeed_otp_write(ctx, offset, OTP_ADDR); //Read address ++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read ++ wait_complete(ctx); ++ buf[0] = aspeed_otp_read(ctx, OTP_COMPARE_1); ++ buf[1] = aspeed_otp_read(ctx, OTP_COMPARE_2); ++} ++ ++static void otp_read_data(struct aspeed_otp *ctx, u32 offset, u32 len) ++{ ++ int i, j; ++ u32 ret[2]; ++ ++ otp_soak(ctx, 0); ++ ++ i = offset; ++ j = 0; ++ if (offset % 2) { ++ otp_read_data_2dw(ctx, i - 1, ret); ++ ctx->data[0] = ret[1]; ++ i++; ++ j++; ++ } ++ for (; j < len; i += 2, j += 2) ++ otp_read_data_2dw(ctx, i, &ctx->data[j]); ++} ++ ++static int otp_prog_data(struct aspeed_otp *ctx, u32 value, u32 dw_offset, u32 bit_offset) ++{ ++ u32 read[2]; ++ int otp_bit; ++ ++ if (dw_offset % 2 == 0) { ++ otp_read_data_2dw(ctx, dw_offset, read); ++ otp_bit = (read[0] >> bit_offset) & 0x1; ++ ++ if (otp_bit == 1 && value == 0) { ++ pr_err("OTPDATA%X[%X] = 1\n", dw_offset, bit_offset); ++ pr_err("OTP is programed, which can't be cleaned\n"); ++ return -EINVAL; ++ } ++ } else { ++ otp_read_data_2dw(ctx, dw_offset - 1, read); ++ otp_bit = (read[1] >> bit_offset) & 0x1; ++ ++ if (otp_bit == 0 && value == 1) { ++ pr_err("OTPDATA%X[%X] = 1\n", dw_offset, bit_offset); ++ pr_err("OTP is programed, which can't be writen\n"); ++ return -EINVAL; ++ } ++ } ++ if (otp_bit == value) { ++ pr_err("OTPDATA%X[%X] = %d\n", dw_offset, bit_offset, value); ++ pr_err("No need to program\n"); ++ return 0; ++ } ++ ++ return otp_prog_bit(ctx, value, dw_offset, bit_offset); ++} ++ ++static int otp_prog_conf(struct aspeed_otp *ctx, u32 value, u32 dw_offset, u32 bit_offset) ++{ ++ u32 read; ++ u32 prog_address = 0; ++ int otp_bit; ++ ++ otp_read_conf_dw(ctx, dw_offset, &read); ++ ++ prog_address = 0x800; ++ prog_address |= (dw_offset / 8) * 0x200; ++ prog_address |= (dw_offset % 8) * 0x2; ++ otp_bit = (read >> bit_offset) & 0x1; ++ if (otp_bit == value) { ++ pr_err("OTPCFG%X[%X] = %d\n", dw_offset, bit_offset, value); ++ pr_err("No need to program\n"); ++ return 0; ++ } ++ if (otp_bit == 1 && value == 0) { ++ pr_err("OTPCFG%X[%X] = 1\n", dw_offset, bit_offset); ++ pr_err("OTP is programed, which can't be clean\n"); ++ return -EINVAL; ++ } ++ ++ return otp_prog_bit(ctx, value, prog_address, bit_offset); ++} ++ ++struct aspeed_otp *glob_ctx; ++ ++void otp_read_data_buf(u32 offset, u32 *buf, u32 len) ++{ ++ int i, j; ++ u32 ret[2]; ++ ++ aspeed_otp_write(glob_ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ ++ otp_soak(glob_ctx, 0); ++ ++ i = offset; ++ j = 0; ++ if (offset % 2) { ++ otp_read_data_2dw(glob_ctx, i - 1, ret); ++ buf[0] = ret[1]; ++ i++; ++ j++; ++ } ++ for (; j < len; i += 2, j += 2) ++ otp_read_data_2dw(glob_ctx, i, &buf[j]); ++ aspeed_otp_write(glob_ctx, 0, OTP_PROTECT_KEY); ++} ++EXPORT_SYMBOL(otp_read_data_buf); ++ ++void otp_read_conf_buf(u32 offset, u32 *buf, u32 len) ++{ ++ int i, j; ++ ++ aspeed_otp_write(glob_ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ otp_soak(glob_ctx, 0); ++ for (i = offset, j = 0; j < len; i++, j++) ++ otp_read_conf_dw(glob_ctx, i, &buf[j]); ++ aspeed_otp_write(glob_ctx, 0, OTP_PROTECT_KEY); ++} ++EXPORT_SYMBOL(otp_read_conf_buf); ++ ++static long otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ void __user *argp = (void __user *)arg; ++ struct otp_read xfer; ++ struct otp_prog prog; ++ u32 reg_read[2]; ++ int ret = 0; ++ ++ switch (cmd) { ++ case ASPEED_OTP_READ_DATA: ++ if (copy_from_user(&xfer, argp, sizeof(struct otp_read))) ++ return -EFAULT; ++ if ((xfer.offset + xfer.len) > 0x800) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ ++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ otp_read_data(ctx, xfer.offset, xfer.len); ++ aspeed_otp_write(ctx, 0, OTP_PROTECT_KEY); ++ ++ if (copy_to_user(xfer.data, ctx->data, xfer.len * 4)) ++ return -EFAULT; ++ if (copy_to_user(argp, &xfer, sizeof(struct otp_read))) ++ return -EFAULT; ++ break; ++ case ASPEED_OTP_READ_CONF: ++ if (copy_from_user(&xfer, argp, sizeof(struct otp_read))) ++ return -EFAULT; ++ if ((xfer.offset + xfer.len) > 0x800) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ ++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ otp_read_conf(ctx, xfer.offset, xfer.len); ++ aspeed_otp_write(ctx, 0, OTP_PROTECT_KEY); ++ ++ if (copy_to_user(xfer.data, ctx->data, xfer.len * 4)) ++ return -EFAULT; ++ if (copy_to_user(argp, &xfer, sizeof(struct otp_read))) ++ return -EFAULT; ++ break; ++ case ASPEED_OTP_PROG_DATA: ++ if (copy_from_user(&prog, argp, sizeof(struct otp_prog))) ++ return -EFAULT; ++ if (prog.bit_offset >= 32 || (prog.value != 0 && prog.value != 1)) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ if (prog.dw_offset >= 0x800) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ ret = otp_prog_data(ctx, prog.value, prog.dw_offset, prog.bit_offset); ++ break; ++ case ASPEED_OTP_PROG_CONF: ++ if (copy_from_user(&prog, argp, sizeof(struct otp_prog))) ++ return -EFAULT; ++ if (prog.bit_offset >= 32 || (prog.value != 0 && prog.value != 1)) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ if (prog.dw_offset >= 0x20) { ++ pr_err("out of range"); ++ return -EINVAL; ++ } ++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY); ++ ret = otp_prog_conf(ctx, prog.value, prog.dw_offset, prog.bit_offset); ++ break; ++ case ASPEED_OTP_VER: ++ if (copy_to_user(argp, &ctx->otp_ver, sizeof(u32))) ++ return -EFAULT; ++ break; ++ case ASPEED_OTP_SW_RID: ++ reg_read[0] = aspeed_otp_read(ctx, SW_REV_ID0); ++ reg_read[1] = aspeed_otp_read(ctx, SW_REV_ID1); ++ if (copy_to_user(argp, reg_read, sizeof(u32) * 2)) ++ return -EFAULT; ++ break; ++ case ASPEED_SEC_KEY_NUM: ++ reg_read[0] = aspeed_otp_read(ctx, SEC_KEY_NUM) & 7; ++ if (copy_to_user(argp, reg_read, sizeof(u32))) ++ return -EFAULT; ++ break; ++ } ++ return ret; ++} ++ ++static int otp_open(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ ++ spin_lock(&otp_state_lock); ++ ++ if (ctx->is_open) { ++ spin_unlock(&otp_state_lock); ++ return -EBUSY; ++ } ++ ++ ctx->is_open = true; ++ ++ spin_unlock(&otp_state_lock); ++ ++ return 0; ++} ++ ++static int otp_release(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ ++ spin_lock(&otp_state_lock); ++ ++ ctx->is_open = false; ++ ++ spin_unlock(&otp_state_lock); ++ ++ return 0; ++} ++ ++static const struct file_operations otp_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = otp_ioctl, ++ .open = otp_open, ++ .release = otp_release, ++}; ++ ++static const struct of_device_id aspeed_otp_of_matches[] = { ++ { .compatible = "aspeed,ast2600-sbc" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, aspeed_otp_of_matches); ++ ++static int aspeed_otp_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct regmap *scu; ++ struct aspeed_otp *priv; ++ struct resource *res; ++ u32 revid0, revid1; ++ int rc; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ glob_ctx = priv; ++ if (!priv) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); ++ return -ENOENT; ++ } ++ ++ priv->reg_base = devm_ioremap_resource(&pdev->dev, res); ++ if (!priv->reg_base) ++ return -EIO; ++ ++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(scu)) { ++ dev_err(dev, "failed to find 2600 SCU regmap\n"); ++ return PTR_ERR(scu); ++ } ++ ++ regmap_read(scu, ASPEED_REVISION_ID0, &revid0); ++ regmap_read(scu, ASPEED_REVISION_ID1, &revid1); ++ ++ priv->otp_ver = chip_version(revid0, revid1); ++ ++ if (priv->otp_ver == -1) { ++ dev_err(dev, "invalid SCU\n"); ++ return -EINVAL; ++ } ++ ++ priv->data = kmalloc(8192, GFP_KERNEL); ++ if (!priv->data) ++ return -ENOMEM; ++ ++ dev_set_drvdata(dev, priv); ++ ++ /* Set up the miscdevice */ ++ priv->miscdev.minor = MISC_DYNAMIC_MINOR; ++ priv->miscdev.name = "aspeed-otp"; ++ priv->miscdev.fops = &otp_fops; ++ ++ /* Register the device */ ++ rc = misc_register(&priv->miscdev); ++ if (rc) { ++ dev_err(dev, "Unable to register device\n"); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++static void aspeed_otp_remove(struct platform_device *pdev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(&pdev->dev); ++ ++ kfree(ctx->data); ++ misc_deregister(&ctx->miscdev); ++} ++ ++static struct platform_driver aspeed_otp_driver = { ++ .probe = aspeed_otp_probe, ++ .remove = aspeed_otp_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_otp_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_otp_driver); ++ ++MODULE_AUTHOR("Neal Liu "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("ASPEED OTP Driver"); +diff --git a/drivers/soc/aspeed/ast2700-espi.c b/drivers/soc/aspeed/ast2700-espi.c +--- a/drivers/soc/aspeed/ast2700-espi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2700-espi.c 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,2304 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ast2700-espi.h" ++ ++#define DEVICE_NAME "aspeed-espi" ++ ++static DEFINE_IDA(ast2700_espi_ida); ++ ++#define PERIF_MCYC_ALIGN SZ_64K ++#define PERIF_MMBI_ALIGN SZ_64M ++#define PERIF_MMBI_MAX_INST 8 ++ ++#define OOB_DMA_RPTR_KEY 0x4f4f4253 ++#define OOB_DMA_DESC_NUM 8 ++#define OOB_DMA_DESC_CUSTOM 0x4 ++ ++#define FLASH_EDAF_ALIGN SZ_16M ++ ++struct ast2700_espi_perif_mmbi { ++ void *b2h_virt; ++ void *h2b_virt; ++ dma_addr_t b2h_addr; ++ dma_addr_t h2b_addr; ++ struct miscdevice b2h_mdev; ++ struct miscdevice h2b_mdev; ++ bool host_rwp_update; ++ wait_queue_head_t wq; ++ struct ast2700_espi_perif *perif; ++}; ++ ++struct ast2700_espi_perif { ++ struct { ++ bool enable; ++ int irq; ++ void *virt; ++ dma_addr_t taddr; ++ uint64_t saddr; ++ uint64_t size; ++ uint32_t inst_num; ++ uint32_t inst_size; ++ struct ast2700_espi_perif_mmbi inst[PERIF_MMBI_MAX_INST]; ++ } mmbi; ++ ++ struct { ++ bool enable; ++ void *virt; ++ dma_addr_t taddr; ++ uint64_t saddr; ++ uint64_t size; ++ } mcyc; ++ ++ struct { ++ bool enable; ++ void *np_tx_virt; ++ dma_addr_t np_tx_addr; ++ void *pc_tx_virt; ++ dma_addr_t pc_tx_addr; ++ void *pc_rx_virt; ++ dma_addr_t pc_rx_addr; ++ } dma; ++ ++ bool rtc_enable; ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex np_tx_mtx; ++ struct mutex pc_tx_mtx; ++ struct mutex pc_rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2700_espi_vw { ++ struct { ++ bool hw_mode; ++ uint32_t grp; ++ uint32_t dir0; ++ uint32_t dir1; ++ uint32_t val0; ++ uint32_t val1; ++ } gpio; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2700_espi_oob_dma_tx_desc { ++ uint32_t data_addrl; ++ uint32_t data_addrh; ++ uint8_t cyc; ++ uint16_t tag : 4; ++ uint16_t len : 12; ++ uint8_t msg_type : 3; ++ uint8_t raz0 : 1; ++ uint8_t pec : 1; ++ uint8_t int_en : 1; ++ uint8_t pause : 1; ++ uint8_t raz1 : 1; ++ uint32_t raz2; ++ uint32_t raz3; ++ uint32_t pad[3]; ++} __packed; ++ ++struct ast2700_espi_oob_dma_rx_desc { ++ uint32_t data_addrl; ++ uint32_t data_addrh; ++ uint8_t cyc; ++ uint16_t tag : 4; ++ uint16_t len : 12; ++ uint8_t raz : 7; ++ uint8_t dirty : 1; ++ uint32_t pad[1]; ++} __packed; ++ ++struct ast2700_espi_oob { ++ struct { ++ bool enable; ++ struct ast2700_espi_oob_dma_tx_desc *txd_virt; ++ dma_addr_t txd_addr; ++ struct ast2700_espi_oob_dma_rx_desc *rxd_virt; ++ dma_addr_t rxd_addr; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex tx_mtx; ++ struct mutex rx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2700_espi_flash { ++ struct { ++ uint32_t mode; ++ phys_addr_t taddr; ++ uint64_t size; ++ } edaf; ++ ++ struct { ++ bool enable; ++ void *tx_virt; ++ dma_addr_t tx_addr; ++ void *rx_virt; ++ dma_addr_t rx_addr; ++ } dma; ++ ++ bool rx_ready; ++ wait_queue_head_t wq; ++ ++ spinlock_t lock; ++ struct mutex rx_mtx; ++ struct mutex tx_mtx; ++ ++ struct miscdevice mdev; ++}; ++ ++struct ast2700_espi { ++ struct device *dev; ++ void __iomem *regs; ++ struct clk *clk; ++ int dev_id; ++ int irq; ++ ++ struct ast2700_espi_perif perif; ++ struct ast2700_espi_vw vw; ++ struct ast2700_espi_oob oob; ++ struct ast2700_espi_flash flash; ++}; ++ ++/* peripheral channel (CH0) */ ++static int ast2700_espi_mmbi_b2h_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ struct ast2700_espi_perif *perif; ++ struct ast2700_espi *espi; ++ unsigned long vm_size; ++ pgprot_t prot; ++ ++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, b2h_mdev); ++ ++ perif = mmbi->perif; ++ ++ espi = container_of(perif, struct ast2700_espi, perif); ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (perif->mmbi.inst_size >> 1)) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (mmbi->b2h_addr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static int ast2700_espi_mmbi_h2b_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ struct ast2700_espi_perif *perif; ++ struct ast2700_espi *espi; ++ unsigned long vm_size; ++ pgprot_t prot; ++ ++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, h2b_mdev); ++ ++ perif = mmbi->perif; ++ ++ espi = container_of(perif, struct ast2700_espi, perif); ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (perif->mmbi.inst_size >> 1)) ++ return -EINVAL; ++ ++ prot = pgprot_noncached(prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (mmbi->h2b_addr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static __poll_t ast2700_espi_mmbi_h2b_poll(struct file *fp, struct poll_table_struct *pt) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ ++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, h2b_mdev); ++ ++ poll_wait(fp, &mmbi->wq, pt); ++ ++ if (!mmbi->host_rwp_update) ++ return 0; ++ ++ mmbi->host_rwp_update = false; ++ ++ return EPOLLIN; ++} ++ ++static long ast2700_espi_perif_pc_get_rx(struct file *fp, ++ struct ast2700_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2700_espi, perif); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&perif->pc_rx_mtx)) ++ return -EAGAIN; ++ ++ if (!perif->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&perif->pc_rx_mtx); ++ ++ if (!perif->rx_ready) { ++ rc = wait_event_interruptible(perif->wq, perif->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_CH0_PC_RX_CTRL); ++ cyc = FIELD_GET(ESPI_CH0_PC_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_CH0_PC_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_CH0_PC_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_PERIF_MSG: ++ pkt_len = sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_MSG_D: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_msg); ++ break; ++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE: ++ case ESPI_PERIF_SUC_CMPLT_D_FIRST: ++ case ESPI_PERIF_SUC_CMPLT_D_LAST: ++ case ESPI_PERIF_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_perif_cmplt); ++ break; ++ case ESPI_PERIF_SUC_CMPLT: ++ case ESPI_PERIF_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_perif_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (perif->dma.enable) { ++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ reg = readl(espi->regs + ESPI_CH0_PC_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ ++ writel(ESPI_CH0_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH0_PC_RX_CTRL); ++ perif->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&perif->pc_rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_perif_pc_put_tx(struct file *fp, ++ struct ast2700_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2700_espi, perif); ++ ++ if (!mutex_trylock(&perif->pc_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_CH0_PC_TX_CTRL); ++ if (reg & ESPI_CH0_PC_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_CH0_PC_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_CH0_PC_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_CH0_PC_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_CH0_PC_TX_CTRL_LEN, len) ++ | ESPI_CH0_PC_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_CH0_PC_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->pc_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_perif_np_put_tx(struct file *fp, ++ struct ast2700_espi_perif *perif, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(perif, struct ast2700_espi, perif); ++ ++ if (!mutex_trylock(&perif->np_tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_CH0_NP_TX_CTRL); ++ if (reg & ESPI_CH0_NP_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (perif->dma.enable) { ++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_CH0_NP_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_CH0_NP_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_CH0_NP_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_CH0_NP_TX_CTRL_LEN, len) ++ | ESPI_CH0_NP_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_CH0_NP_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_n_out: ++ mutex_unlock(&perif->np_tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2700_espi_perif *perif; ++ struct aspeed_espi_ioc ioc; ++ ++ perif = container_of(fp->private_data, struct ast2700_espi_perif, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_PERIF_PC_GET_RX: ++ return ast2700_espi_perif_pc_get_rx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_PC_PUT_TX: ++ return ast2700_espi_perif_pc_put_tx(fp, perif, &ioc); ++ case ASPEED_ESPI_PERIF_NP_PUT_TX: ++ return ast2700_espi_perif_np_put_tx(fp, perif, &ioc); ++ default: ++ break; ++ }; ++ ++ return -EINVAL; ++} ++ ++static int ast2700_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma) ++{ ++ struct ast2700_espi_perif *perif; ++ unsigned long vm_size; ++ pgprot_t vm_prot; ++ ++ perif = container_of(fp->private_data, struct ast2700_espi_perif, mdev); ++ if (!perif->mcyc.enable) ++ return -EPERM; ++ ++ vm_size = vma->vm_end - vma->vm_start; ++ vm_prot = vma->vm_page_prot; ++ ++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size) ++ return -EINVAL; ++ ++ vm_prot = pgprot_noncached(vm_prot); ++ ++ if (remap_pfn_range(vma, vma->vm_start, ++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff, ++ vm_size, vm_prot)) ++ return -EAGAIN; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2700_espi_mmbi_b2h_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2700_espi_mmbi_b2h_mmap, ++}; ++ ++static const struct file_operations ast2700_espi_mmbi_h2b_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2700_espi_mmbi_h2b_mmap, ++ .poll = ast2700_espi_mmbi_h2b_poll, ++}; ++ ++static const struct file_operations ast2700_espi_perif_fops = { ++ .owner = THIS_MODULE, ++ .mmap = ast2700_espi_perif_mmap, ++ .unlocked_ioctl = ast2700_espi_perif_ioctl, ++}; ++ ++static irqreturn_t ast2700_espi_perif_mmbi_isr(int irq, void *arg) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ struct ast2700_espi_perif *perif; ++ struct ast2700_espi *espi; ++ uint32_t sts, tmp; ++ uint32_t *p; ++ int i; ++ ++ espi = (struct ast2700_espi *)arg; ++ ++ perif = &espi->perif; ++ ++ sts = readl(espi->regs + ESPI_MMBI_INT_STS); ++ if (!sts) ++ return IRQ_NONE; ++ ++ for (i = 0, tmp = sts; i < perif->mmbi.inst_num; ++i, tmp >>= 2) { ++ if (!(tmp & 0x3)) ++ continue; ++ ++ mmbi = &perif->mmbi.inst[i]; ++ ++ p = (uint32_t *)mmbi->h2b_virt; ++ p[0] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i)); ++ p[1] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i) + 4); ++ ++ mmbi->host_rwp_update = true; ++ ++ wake_up_interruptible(&mmbi->wq); ++ } ++ ++ writel(sts, espi->regs + ESPI_MMBI_INT_STS); ++ ++ return IRQ_HANDLED; ++} ++ ++static void ast2700_espi_perif_isr(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_perif *perif; ++ unsigned long flags; ++ uint32_t sts; ++ ++ perif = &espi->perif; ++ ++ sts = readl(espi->regs + ESPI_CH0_INT_STS); ++ ++ if (sts & ESPI_CH0_INT_STS_PC_RX_CMPLT) { ++ writel(ESPI_CH0_INT_STS_PC_RX_CMPLT, espi->regs + ESPI_CH0_INT_STS); ++ ++ spin_lock_irqsave(&perif->lock, flags); ++ perif->rx_ready = true; ++ spin_unlock_irqrestore(&perif->lock, flags); ++ ++ wake_up_interruptible(&perif->wq); ++ } ++} ++ ++static void ast2700_espi_perif_sw_reset(struct ast2700_espi *espi) ++{ ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg &= ~(ESPI_CH0_CTRL_NP_TX_RST ++ | ESPI_CH0_CTRL_NP_RX_RST ++ | ESPI_CH0_CTRL_PC_TX_RST ++ | ESPI_CH0_CTRL_PC_RX_RST ++ | ESPI_CH0_CTRL_NP_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_RX_DMA_EN ++ | ESPI_CH0_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CH0_CTRL_NP_TX_RST ++ | ESPI_CH0_CTRL_NP_RX_RST ++ | ESPI_CH0_CTRL_PC_TX_RST ++ | ESPI_CH0_CTRL_PC_RX_RST); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++} ++ ++static void ast2700_espi_perif_reset(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_perif *perif; ++ struct device *dev; ++ uint64_t mask; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ writel(0x0, espi->regs + ESPI_CH0_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_CH0_INT_STS); ++ ++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS_WDT | ESPI_CH0_CTRL_MCYC_WR_DIS_WDT); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL); ++ reg &= ~ESPI_CH0_MCYC0_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL); ++ reg &= ~ESPI_CH0_MCYC1_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg |= (ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS); ++ reg &= ~(ESPI_CH0_CTRL_NP_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_RX_DMA_EN ++ | ESPI_CH0_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ ++ if (perif->mmbi.enable) { ++ reg = readl(espi->regs + ESPI_MMBI_CTRL); ++ reg &= ~ESPI_MMBI_CTRL_EN; ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ mask = ~(perif->mmbi.size - 1); ++ writel(mask >> 32, espi->regs + ESPI_CH0_MCYC0_MASKH); ++ writel(mask & 0xffffffff, espi->regs + ESPI_CH0_MCYC0_MASKL); ++ writel((perif->mmbi.saddr >> 32), espi->regs + ESPI_CH0_MCYC0_SADDRH); ++ writel((perif->mmbi.saddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC0_SADDRL); ++ writel((perif->mmbi.taddr >> 32), espi->regs + ESPI_CH0_MCYC0_TADDRH); ++ writel((perif->mmbi.taddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC0_TADDRL); ++ ++ writel((0x1 << (perif->mmbi.inst_num * 2)) - 1, espi->regs + ESPI_MMBI_INT_EN); ++ ++ reg = FIELD_PREP(ESPI_MMBI_CTRL_INST_NUM, count_trailing_zeros(perif->mmbi.inst_num)) ++ | ESPI_MMBI_CTRL_EN; ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL) | ESPI_CH0_MCYC0_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ } ++ ++ if (perif->mcyc.enable) { ++ mask = ~(perif->mcyc.size - 1); ++ writel(mask >> 32, espi->regs + ESPI_CH0_MCYC1_MASKH); ++ writel(mask & 0xffffffff, espi->regs + ESPI_CH0_MCYC1_MASKL); ++ writel((perif->mcyc.saddr >> 32), espi->regs + ESPI_CH0_MCYC1_SADDRH); ++ writel((perif->mcyc.saddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC1_SADDRL); ++ writel((perif->mcyc.taddr >> 32), espi->regs + ESPI_CH0_MCYC1_TADDRH); ++ writel((perif->mcyc.taddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC1_TADDRL); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL) | ESPI_CH0_MCYC1_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ } ++ ++ if (perif->dma.enable) { ++ writel((perif->dma.np_tx_addr >> 32), espi->regs + ESPI_CH0_NP_TX_DMAH); ++ writel((perif->dma.np_tx_addr & 0xffffffff), espi->regs + ESPI_CH0_NP_TX_DMAL); ++ writel((perif->dma.pc_tx_addr >> 32), espi->regs + ESPI_CH0_PC_TX_DMAH); ++ writel((perif->dma.pc_tx_addr & 0xffffffff), espi->regs + ESPI_CH0_PC_TX_DMAL); ++ writel((perif->dma.pc_rx_addr >> 32), espi->regs + ESPI_CH0_PC_RX_DMAH); ++ writel((perif->dma.pc_rx_addr & 0xffffffff), espi->regs + ESPI_CH0_PC_RX_DMAL); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL) ++ | ESPI_CH0_CTRL_NP_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ } ++ if (perif->rtc_enable) { ++ reg = readl(espi->regs + ESPI_CAP_GEN) ++ | ESPI_CAP_GEN_RTC_SUP; ++ writel(reg, espi->regs + ESPI_CAP_GEN); ++ } ++ ++ writel(ESPI_CH0_INT_EN_PC_RX_CMPLT, espi->regs + ESPI_CH0_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL) | ESPI_CH0_CTRL_SW_RDY; ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++} ++ ++static int ast2700_espi_perif_probe(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ struct ast2700_espi_perif *perif; ++ struct platform_device *pdev; ++ struct device_node *np; ++ struct resource res; ++ struct device *dev; ++ int i, rc; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ init_waitqueue_head(&perif->wq); ++ ++ spin_lock_init(&perif->lock); ++ ++ mutex_init(&perif->np_tx_mtx); ++ mutex_init(&perif->pc_tx_mtx); ++ mutex_init(&perif->pc_rx_mtx); ++ ++ perif->mmbi.enable = of_property_read_bool(dev->of_node, "perif-mmbi-enable"); ++ if (perif->mmbi.enable) { ++ pdev = container_of(dev, struct platform_device, dev); ++ ++ perif->mmbi.irq = platform_get_irq(pdev, 1); ++ if (perif->mmbi.irq < 0) { ++ dev_err(dev, "cannot get MMBI IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u64(dev->of_node, "perif-mmbi-src-addr", &perif->mmbi.saddr); ++ if (rc || !IS_ALIGNED(perif->mmbi.saddr, PERIF_MMBI_ALIGN)) { ++ dev_err(dev, "cannot get 64MB-aligned MMBI host address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-instance-num", &perif->mmbi.inst_num); ++ if (rc || ++ perif->mmbi.inst_num == 0 || ++ perif->mmbi.inst_num > PERIF_MMBI_MAX_INST || ++ (perif->mmbi.inst_num & (perif->mmbi.inst_num - 1))) { ++ dev_err(dev, "cannot get valid MMBI instance number, expect 1/2/4/8\n"); ++ return -EINVAL; ++ } ++ ++ np = of_parse_phandle(dev->of_node, "perif-mmbi-tgt-memory", 0); ++ if (!np || of_address_to_resource(np, 0, &res)) { ++ dev_err(dev, "cannot get MMBI memory region\n"); ++ return -ENODEV; ++ } ++ ++ of_node_put(np); ++ ++ perif->mmbi.taddr = res.start; ++ perif->mmbi.size = resource_size(&res); ++ perif->mmbi.inst_size = perif->mmbi.size / perif->mmbi.inst_num; ++ if (!IS_ALIGNED(perif->mmbi.taddr, PERIF_MMBI_ALIGN) || ++ !IS_ALIGNED(perif->mmbi.size, PERIF_MMBI_ALIGN)) { ++ dev_err(dev, "cannot get 64MB-aligned MMBI address/size\n"); ++ return -EINVAL; ++ } ++ ++ perif->mmbi.virt = devm_ioremap_resource(dev, &res); ++ if (!perif->mmbi.virt) { ++ dev_err(dev, "cannot map MMBI memory region\n"); ++ return -ENOMEM; ++ } ++ ++ memset_io(perif->mmbi.virt, 0, perif->mmbi.size); ++ ++ for (i = 0; i < perif->mmbi.inst_num; ++i) { ++ mmbi = &perif->mmbi.inst[i]; ++ ++ init_waitqueue_head(&mmbi->wq); ++ ++ mmbi->perif = perif; ++ mmbi->host_rwp_update = false; ++ ++ mmbi->b2h_virt = perif->mmbi.virt + ((perif->mmbi.inst_size >> 1) * i); ++ mmbi->b2h_addr = perif->mmbi.taddr + ((perif->mmbi.inst_size >> 1) * i); ++ mmbi->b2h_mdev.parent = dev; ++ mmbi->b2h_mdev.minor = MISC_DYNAMIC_MINOR; ++ mmbi->b2h_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi%d-b2h%d", ++ DEVICE_NAME, espi->dev_id, i); ++ mmbi->b2h_mdev.fops = &ast2700_espi_mmbi_b2h_fops; ++ rc = misc_register(&mmbi->b2h_mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", mmbi->b2h_mdev.name); ++ return rc; ++ } ++ ++ mmbi->h2b_virt = perif->mmbi.virt + ((perif->mmbi.inst_size >> 1) * (i + perif->mmbi.inst_num)); ++ mmbi->h2b_addr = perif->mmbi.taddr + ((perif->mmbi.inst_size >> 1) * (i + perif->mmbi.inst_num)); ++ mmbi->h2b_mdev.parent = dev; ++ mmbi->h2b_mdev.minor = MISC_DYNAMIC_MINOR; ++ mmbi->h2b_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi%d-h2b%d", ++ DEVICE_NAME, espi->dev_id, i); ++ mmbi->h2b_mdev.fops = &ast2700_espi_mmbi_h2b_fops; ++ rc = misc_register(&mmbi->h2b_mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", mmbi->h2b_mdev.name); ++ return rc; ++ } ++ } ++ } ++ ++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable"); ++ if (perif->mcyc.enable) { ++ rc = of_property_read_u64(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr); ++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u64(dev->of_node, "perif-mcyc-size", &perif->mcyc.size); ++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) { ++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n"); ++ return -EINVAL; ++ } ++ ++ np = of_parse_phandle(dev->of_node, "memory-region", 0); ++ if (np) { ++ of_reserved_mem_device_init(dev); ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "Failed to mask DMA.\n"); ++ return -ENODEV; ++ } ++ } ++ ++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size, ++ &perif->mcyc.taddr, GFP_KERNEL); ++ if (!perif->mcyc.virt) { ++ dev_err(dev, "cannot allocate memory cycle\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode"); ++ if (perif->dma.enable) { ++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_tx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_tx_virt) { ++ dev_err(dev, "cannot allocate posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.pc_rx_addr, GFP_KERNEL); ++ if (!perif->dma.pc_rx_virt) { ++ dev_err(dev, "cannot allocate posted RX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ ++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, ++ &perif->dma.np_tx_addr, GFP_KERNEL); ++ if (!perif->dma.np_tx_virt) { ++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ perif->rtc_enable = of_property_read_bool(dev->of_node, "perif-rtc-enable"); ++ ++ perif->mdev.parent = dev; ++ perif->mdev.minor = MISC_DYNAMIC_MINOR; ++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral%d", DEVICE_NAME, espi->dev_id); ++ perif->mdev.fops = &ast2700_espi_perif_fops; ++ rc = misc_register(&perif->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", perif->mdev.name); ++ return rc; ++ } ++ ++ ast2700_espi_perif_reset(espi); ++ ++ if (perif->mmbi.enable) { ++ rc = devm_request_irq(dev, espi->perif.mmbi.irq, ++ ast2700_espi_perif_mmbi_isr, 0, dev_name(dev), espi); ++ if (rc) { ++ dev_err(dev, "cannot request MMBI IRQ\n"); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ast2700_espi_perif_remove(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_perif_mmbi *mmbi; ++ struct ast2700_espi_perif *perif; ++ struct device *dev; ++ uint32_t reg; ++ int i; ++ ++ dev = espi->dev; ++ ++ perif = &espi->perif; ++ ++ writel(0x0, espi->regs + ESPI_CH0_INT_EN); ++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL); ++ reg &= ~ESPI_CH0_MCYC0_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL); ++ reg &= ~ESPI_CH0_MCYC1_MASKL_EN; ++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL); ++ ++ reg = readl(espi->regs + ESPI_CH0_CTRL); ++ reg |= (ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS); ++ reg &= ~(ESPI_CH0_CTRL_NP_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_TX_DMA_EN ++ | ESPI_CH0_CTRL_PC_RX_DMA_EN ++ | ESPI_CH0_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH0_CTRL); ++ ++ if (perif->mmbi.enable) { ++ reg = readl(espi->regs + ESPI_MMBI_CTRL); ++ reg &= ~ESPI_MMBI_CTRL_EN; ++ writel(reg, espi->regs + ESPI_MMBI_CTRL); ++ ++ for (i = 0; i < perif->mmbi.inst_num; ++i) { ++ mmbi = &perif->mmbi.inst[i]; ++ misc_deregister(&mmbi->b2h_mdev); ++ misc_deregister(&mmbi->h2b_mdev); ++ } ++ ++ devm_iounmap(dev, perif->mmbi.virt); ++ } ++ ++ if (perif->mcyc.enable) ++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt, ++ perif->mcyc.taddr); ++ ++ if (perif->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt, ++ perif->dma.np_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt, ++ perif->dma.pc_tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt, ++ perif->dma.pc_rx_addr); ++ } ++ ++ mutex_destroy(&perif->np_tx_mtx); ++ mutex_destroy(&perif->pc_tx_mtx); ++ mutex_destroy(&perif->pc_rx_mtx); ++ ++ misc_deregister(&perif->mdev); ++ ++ return 0; ++} ++ ++/* virtual wire channel (CH1) */ ++static long ast2700_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2700_espi_vw *vw; ++ struct ast2700_espi *espi; ++ uint32_t gpio0, gpio1; ++ uint32_t hw_mode; ++ ++ vw = container_of(fp->private_data, struct ast2700_espi_vw, mdev); ++ espi = container_of(vw, struct ast2700_espi, vw); ++ gpio0 = vw->gpio.val0; ++ gpio1 = vw->gpio.val1; ++ hw_mode = vw->gpio.hw_mode; ++ ++ if (hw_mode) { ++ dev_err(espi->dev, "HW mode: vGPIO reflect on physical GPIO. Get state from GPIO driver.\n"); ++ return -EFAULT; ++ } ++ ++ switch (cmd) { ++ case ASPEED_ESPI_VW_GET_GPIO_VAL: ++ if (put_user(gpio0, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to get vGPIO value0\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Get vGPIO value0: 0x%x\n", gpio0); ++ break; ++ ++ case ASPEED_ESPI_VW_PUT_GPIO_VAL: ++ if (get_user(gpio0, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to put vGPIO value0\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Put vGPIO value0: 0x%x\n", gpio0); ++ writel(gpio0, espi->regs + ESPI_CH1_GPIO_VAL0); ++ break; ++ ++ case ASPEED_ESPI_VW_GET_GPIO_VAL1: ++ if (put_user(gpio1, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to get vGPIO value1\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Get vGPIO value1: 0x%x\n", gpio1); ++ break; ++ ++ case ASPEED_ESPI_VW_PUT_GPIO_VAL1: ++ if (get_user(gpio1, (uint32_t __user *)arg)) { ++ dev_err(espi->dev, "failed to put vGPIO value1\n"); ++ return -EFAULT; ++ } ++ ++ dev_info(espi->dev, "Put vGPIO value1: 0x%x\n", gpio1); ++ writel(gpio1, espi->regs + ESPI_CH1_GPIO_VAL1); ++ break; ++ ++ default: ++ return -EINVAL; ++ }; ++ ++ return 0; ++} ++ ++static const struct file_operations ast2700_espi_vw_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2700_espi_vw_ioctl, ++}; ++ ++static void ast2700_espi_vw_isr(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_vw *vw; ++ uint32_t sts; ++ ++ vw = &espi->vw; ++ ++ sts = readl(espi->regs + ESPI_CH1_INT_STS); ++ ++ if (sts & ESPI_CH1_INT_STS_GPIO) { ++ vw->gpio.val0 = readl(espi->regs + ESPI_CH1_GPIO_VAL0); ++ vw->gpio.val1 = readl(espi->regs + ESPI_CH1_GPIO_VAL1); ++ writel(ESPI_CH1_INT_STS_GPIO, espi->regs + ESPI_CH1_INT_STS); ++ } ++} ++ ++static void ast2700_espi_vw_reset(struct ast2700_espi *espi) ++{ ++ uint32_t reg; ++ struct ast2700_espi_vw *vw = &espi->vw; ++ ++ writel(0x0, espi->regs + ESPI_CH1_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_CH1_INT_STS); ++ ++ writel(vw->gpio.grp, espi->regs + ESPI_CH1_GPIO_GRP); ++ writel(vw->gpio.dir0, espi->regs + ESPI_CH1_GPIO_DIR0); ++ writel(vw->gpio.dir1, espi->regs + ESPI_CH1_GPIO_DIR1); ++ ++ vw->gpio.val0 = readl(espi->regs + ESPI_CH1_GPIO_VAL0); ++ vw->gpio.val1 = readl(espi->regs + ESPI_CH1_GPIO_VAL1); ++ ++ writel(ESPI_CH1_INT_EN_GPIO, espi->regs + ESPI_CH1_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH1_CTRL) ++ | ((vw->gpio.hw_mode) ? ESPI_CH1_CTRL_GPIO_HW : 0) ++ | ESPI_CH1_CTRL_SW_RDY; ++ writel(reg, espi->regs + ESPI_CH1_CTRL); ++} ++ ++static int ast2700_espi_vw_probe(struct ast2700_espi *espi) ++{ ++ int rc; ++ struct device *dev = espi->dev; ++ struct ast2700_espi_vw *vw = &espi->vw; ++ ++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode"); ++ of_property_read_u32(dev->of_node, "vw-gpio-group", &vw->gpio.grp); ++ of_property_read_u32_index(dev->of_node, "vw-gpio-direction", 0, &vw->gpio.dir0); ++ of_property_read_u32_index(dev->of_node, "vw-gpio-direction", 1, &vw->gpio.dir1); ++ ++ vw->mdev.parent = dev; ++ vw->mdev.minor = MISC_DYNAMIC_MINOR; ++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw%d", DEVICE_NAME, espi->dev_id); ++ vw->mdev.fops = &ast2700_espi_vw_fops; ++ rc = misc_register(&vw->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", vw->mdev.name); ++ return rc; ++ } ++ ++ ast2700_espi_vw_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2700_espi_vw_remove(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_vw *vw; ++ ++ vw = &espi->vw; ++ ++ writel(0x0, espi->regs + ESPI_CH1_INT_EN); ++ ++ misc_deregister(&vw->mdev); ++ ++ return 0; ++} ++ ++/* out-of-band channel (CH2) */ ++static long ast2700_espi_oob_dma_get_rx(struct file *fp, ++ struct ast2700_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ struct ast2700_espi_oob_dma_rx_desc *d; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint32_t wptr, pkt_len; ++ unsigned long flags; ++ uint8_t *pkt; ++ int rc; ++ ++ espi = container_of(oob, struct ast2700_espi, oob); ++ ++ wptr = FIELD_PREP(ESPI_CH2_RX_DESC_WPTR_WP, readl(espi->regs + ESPI_CH2_RX_DESC_WPTR)); ++ ++ d = &oob->dma.rxd_virt[wptr]; ++ ++ if (!d->dirty) ++ return -EFAULT; ++ ++ pkt_len = ((d->len) ? d->len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr); ++ ++ if (ioc->pkt_len < pkt_len) ++ return -EINVAL; ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) ++ return -ENOMEM; ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = d->cyc; ++ hdr->tag = d->tag; ++ hdr->len_h = d->len >> 8; ++ hdr->len_l = d->len & 0xff; ++ memcpy(hdr + 1, oob->dma.rx_virt + (PAGE_SIZE * wptr), pkt_len - sizeof(*hdr)); ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ ++ /* make current descriptor available again */ ++ d->dirty = 0; ++ ++ wptr = ((wptr + 1) % OOB_DMA_DESC_NUM); ++ writel(wptr | ESPI_CH2_RX_DESC_WPTR_VALID, espi->regs + ESPI_CH2_RX_DESC_WPTR); ++ ++ /* set ready flag base on the next RX descriptor */ ++ oob->rx_ready = oob->dma.rxd_virt[wptr].dirty; ++ ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++ return rc; ++} ++ ++static long ast2700_espi_oob_get_rx(struct file *fp, ++ struct ast2700_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2700_espi, oob); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&oob->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!oob->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&oob->rx_mtx); ++ ++ if (!oob->rx_ready) { ++ rc = wait_event_interruptible(oob->wq, oob->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ if (oob->dma.enable) { ++ rc = ast2700_espi_oob_dma_get_rx(fp, oob, ioc); ++ goto unlock_mtx_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_CH2_RX_CTRL); ++ cyc = FIELD_GET(ESPI_CH2_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_CH2_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_CH2_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr); ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ for (i = sizeof(*hdr); i < pkt_len; ++i) { ++ reg = readl(espi->regs + ESPI_CH2_RX_DATA); ++ pkt[i] = reg & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ ++ writel(ESPI_CH2_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH2_RX_CTRL); ++ oob->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_oob_dma_put_tx(struct file *fp, ++ struct ast2700_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ struct ast2700_espi_oob_dma_tx_desc *d; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint32_t rptr, wptr; ++ uint8_t *pkt; ++ int rc; ++ ++ espi = container_of(oob, struct ast2700_espi, oob); ++ ++ pkt = vzalloc(ioc->pkt_len); ++ if (!pkt) ++ return -ENOMEM; ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* kick HW to update descriptor read/write pointer */ ++ writel(ESPI_CH2_TX_DESC_RPTR_UPT, espi->regs + ESPI_CH2_TX_DESC_RPTR); ++ ++ rptr = readl(espi->regs + ESPI_CH2_TX_DESC_RPTR); ++ wptr = readl(espi->regs + ESPI_CH2_TX_DESC_WPTR); ++ ++ if (((wptr + 1) % OOB_DMA_DESC_NUM) == rptr) { ++ rc = -EBUSY; ++ goto free_n_out; ++ } ++ ++ d = &oob->dma.txd_virt[wptr]; ++ d->cyc = hdr->cyc; ++ d->tag = hdr->tag; ++ d->len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ d->msg_type = OOB_DMA_DESC_CUSTOM; ++ ++ memcpy(oob->dma.tx_virt + (PAGE_SIZE * wptr), hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ ++ dma_wmb(); ++ ++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM; ++ writel(wptr | ESPI_CH2_TX_DESC_WPTR_VALID, espi->regs + ESPI_CH2_TX_DESC_WPTR); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++ return rc; ++} ++ ++static long ast2700_espi_oob_put_tx(struct file *fp, ++ struct ast2700_espi_oob *oob, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(oob, struct ast2700_espi, oob); ++ ++ if (!mutex_trylock(&oob->tx_mtx)) ++ return -EAGAIN; ++ ++ if (oob->dma.enable) { ++ rc = ast2700_espi_oob_dma_put_tx(fp, oob, ioc); ++ goto unlock_mtx_n_out; ++ } ++ ++ reg = readl(espi->regs + ESPI_CH2_TX_CTRL); ++ if (reg & ESPI_CH2_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_CH2_TX_DATA); ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_CH2_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_CH2_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_CH2_TX_CTRL_LEN, len) ++ | ESPI_CH2_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_CH2_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&oob->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2700_espi_oob *oob; ++ struct aspeed_espi_ioc ioc; ++ ++ oob = container_of(fp->private_data, struct ast2700_espi_oob, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_OOB_GET_RX: ++ return ast2700_espi_oob_get_rx(fp, oob, &ioc); ++ case ASPEED_ESPI_OOB_PUT_TX: ++ return ast2700_espi_oob_put_tx(fp, oob, &ioc); ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2700_espi_oob_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2700_espi_oob_ioctl, ++}; ++ ++static void ast2700_espi_oob_isr(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_oob *oob; ++ unsigned long flags; ++ uint32_t sts; ++ ++ oob = &espi->oob; ++ ++ sts = readl(espi->regs + ESPI_CH2_INT_STS); ++ ++ if (sts & ESPI_CH2_INT_STS_RX_CMPLT) { ++ writel(ESPI_CH2_INT_STS_RX_CMPLT, espi->regs + ESPI_CH2_INT_STS); ++ ++ spin_lock_irqsave(&oob->lock, flags); ++ oob->rx_ready = true; ++ spin_unlock_irqrestore(&oob->lock, flags); ++ ++ wake_up_interruptible(&oob->wq); ++ } ++} ++ ++static void ast2700_espi_oob_reset(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_oob *oob; ++ dma_addr_t tx_addr, rx_addr; ++ uint32_t reg; ++ int i; ++ ++ oob = &espi->oob; ++ ++ writel(0x0, espi->regs + ESPI_CH2_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_CH2_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CH2_CTRL); ++ reg &= ~(ESPI_CH2_CTRL_TX_RST ++ | ESPI_CH2_CTRL_RX_RST ++ | ESPI_CH2_CTRL_TX_DMA_EN ++ | ESPI_CH2_CTRL_RX_DMA_EN ++ | ESPI_CH2_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH2_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CH2_CTRL_TX_RST | ESPI_CH2_CTRL_RX_RST); ++ writel(reg, espi->regs + ESPI_CH2_CTRL); ++ ++ if (oob->dma.enable) { ++ tx_addr = oob->dma.tx_addr; ++ rx_addr = oob->dma.rx_addr; ++ ++ for (i = 0; i < OOB_DMA_DESC_NUM; ++i) { ++ oob->dma.txd_virt[i].data_addrh = tx_addr >> 32; ++ oob->dma.txd_virt[i].data_addrl = tx_addr & 0xffffffff; ++ tx_addr += PAGE_SIZE; ++ ++ oob->dma.rxd_virt[i].data_addrh = rx_addr >> 32; ++ oob->dma.rxd_virt[i].data_addrl = rx_addr & 0xffffffff; ++ oob->dma.rxd_virt[i].dirty = 0; ++ rx_addr += PAGE_SIZE; ++ } ++ ++ writel(oob->dma.txd_addr >> 32, espi->regs + ESPI_CH2_TX_DMAH); ++ writel(oob->dma.txd_addr & 0xffffffff, espi->regs + ESPI_CH2_TX_DMAL); ++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_CH2_TX_DESC_RPTR); ++ writel(0x0, espi->regs + ESPI_CH2_TX_DESC_WPTR); ++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_CH2_TX_DESC_EPTR); ++ ++ writel(oob->dma.rxd_addr >> 32, espi->regs + ESPI_CH2_RX_DMAH); ++ writel(oob->dma.rxd_addr & 0xffffffff, espi->regs + ESPI_CH2_RX_DMAL); ++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_CH2_RX_DESC_RPTR); ++ writel(0x0, espi->regs + ESPI_CH2_RX_DESC_WPTR); ++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_CH2_RX_DESC_EPTR); ++ ++ reg = readl(espi->regs + ESPI_CH2_CTRL) ++ | ESPI_CH2_CTRL_TX_DMA_EN ++ | ESPI_CH2_CTRL_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CH2_CTRL); ++ ++ /* activate RX DMA to make OOB_FREE */ ++ reg = readl(espi->regs + ESPI_CH2_RX_DESC_WPTR) | ESPI_CH2_RX_DESC_WPTR_VALID; ++ writel(reg, espi->regs + ESPI_CH2_RX_DESC_WPTR); ++ } ++ ++ writel(ESPI_CH2_INT_EN_RX_CMPLT, espi->regs + ESPI_CH2_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH2_CTRL) | ESPI_CH2_CTRL_SW_RDY; ++ writel(reg, espi->regs + ESPI_CH2_CTRL); ++} ++ ++static int ast2700_espi_oob_probe(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_oob *oob; ++ struct device *dev; ++ int rc; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ init_waitqueue_head(&oob->wq); ++ ++ spin_lock_init(&oob->lock); ++ ++ mutex_init(&oob->tx_mtx); ++ mutex_init(&oob->rx_mtx); ++ ++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode"); ++ if (oob->dma.enable) { ++ oob->dma.txd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, &oob->dma.txd_addr, GFP_KERNEL); ++ if (!oob->dma.txd_virt) { ++ dev_err(dev, "cannot allocate DMA TX descriptor\n"); ++ return -ENOMEM; ++ } ++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.tx_addr, GFP_KERNEL); ++ if (!oob->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ oob->dma.rxd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, &oob->dma.rxd_addr, GFP_KERNEL); ++ if (!oob->dma.rxd_virt) { ++ dev_err(dev, "cannot allocate DMA RX descriptor\n"); ++ return -ENOMEM; ++ } ++ ++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.rx_addr, GFP_KERNEL); ++ if (!oob->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ oob->mdev.parent = dev; ++ oob->mdev.minor = MISC_DYNAMIC_MINOR; ++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob%d", DEVICE_NAME, espi->dev_id); ++ oob->mdev.fops = &ast2700_espi_oob_fops; ++ rc = misc_register(&oob->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", oob->mdev.name); ++ return rc; ++ } ++ ++ ast2700_espi_oob_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2700_espi_oob_remove(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_oob *oob; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ oob = &espi->oob; ++ ++ writel(0x0, espi->regs + ESPI_CH2_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH2_CTRL); ++ reg &= ~(ESPI_CH2_CTRL_TX_DMA_EN ++ | ESPI_CH2_CTRL_RX_DMA_EN ++ | ESPI_CH2_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH2_CTRL); ++ ++ if (oob->dma.enable) { ++ dmam_free_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, ++ oob->dma.txd_virt, oob->dma.txd_addr); ++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, ++ oob->dma.tx_virt, oob->dma.tx_addr); ++ dmam_free_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, ++ oob->dma.rxd_virt, oob->dma.rxd_addr); ++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, ++ oob->dma.rx_virt, oob->dma.rx_addr); ++ } ++ ++ mutex_destroy(&oob->tx_mtx); ++ mutex_destroy(&oob->rx_mtx); ++ ++ misc_deregister(&oob->mdev); ++ ++ return 0; ++} ++ ++/* flash channel (CH3) */ ++static long ast2700_espi_flash_get_rx(struct file *fp, ++ struct ast2700_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ unsigned long flags; ++ uint32_t pkt_len; ++ uint8_t *pkt; ++ int i, rc; ++ ++ rc = 0; ++ ++ espi = container_of(flash, struct ast2700_espi, flash); ++ ++ if (fp->f_flags & O_NONBLOCK) { ++ if (!mutex_trylock(&flash->rx_mtx)) ++ return -EAGAIN; ++ ++ if (!flash->rx_ready) { ++ rc = -ENODATA; ++ goto unlock_mtx_n_out; ++ } ++ } else { ++ mutex_lock(&flash->rx_mtx); ++ ++ if (!flash->rx_ready) { ++ rc = wait_event_interruptible(flash->wq, flash->rx_ready); ++ if (rc == -ERESTARTSYS) { ++ rc = -EINTR; ++ goto unlock_mtx_n_out; ++ } ++ } ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ reg = readl(espi->regs + ESPI_CH3_RX_CTRL); ++ cyc = FIELD_GET(ESPI_CH3_RX_CTRL_CYC, reg); ++ tag = FIELD_GET(ESPI_CH3_RX_CTRL_TAG, reg); ++ len = FIELD_GET(ESPI_CH3_RX_CTRL_LEN, reg); ++ ++ /* ++ * calculate the length of the rest part of the ++ * eSPI packet to be read from HW and copied to ++ * user space. ++ */ ++ switch (cyc) { ++ case ESPI_FLASH_WRITE: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_READ: ++ case ESPI_FLASH_ERASE: ++ pkt_len = sizeof(struct espi_flash_rwe); ++ break; ++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE: ++ case ESPI_FLASH_SUC_CMPLT_D_FIRST: ++ case ESPI_FLASH_SUC_CMPLT_D_LAST: ++ case ESPI_FLASH_SUC_CMPLT_D_ONLY: ++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + ++ sizeof(struct espi_flash_cmplt); ++ break; ++ case ESPI_FLASH_SUC_CMPLT: ++ case ESPI_FLASH_UNSUC_CMPLT: ++ pkt_len = sizeof(struct espi_flash_cmplt); ++ break; ++ default: ++ rc = -EFAULT; ++ goto unlock_mtx_n_out; ++ } ++ ++ if (ioc->pkt_len < pkt_len) { ++ rc = -EINVAL; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ hdr->cyc = cyc; ++ hdr->tag = tag; ++ hdr->len_h = len >> 8; ++ hdr->len_l = len & 0xff; ++ ++ if (flash->dma.enable) { ++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr)); ++ } else { ++ for (i = sizeof(*hdr); i < pkt_len; ++i) ++ pkt[i] = readl(espi->regs + ESPI_CH3_RX_DATA) & 0xff; ++ } ++ ++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ spin_lock_irqsave(&flash->lock, flags); ++ ++ writel(ESPI_CH3_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH3_RX_CTRL); ++ flash->rx_ready = 0; ++ ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->rx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_flash_put_tx(struct file *fp, ++ struct ast2700_espi_flash *flash, ++ struct aspeed_espi_ioc *ioc) ++{ ++ uint32_t reg, cyc, tag, len; ++ struct ast2700_espi *espi; ++ struct espi_comm_hdr *hdr; ++ uint8_t *pkt; ++ int i, rc; ++ ++ espi = container_of(flash, struct ast2700_espi, flash); ++ ++ if (!mutex_trylock(&flash->tx_mtx)) ++ return -EAGAIN; ++ ++ reg = readl(espi->regs + ESPI_CH3_TX_CTRL); ++ if (reg & ESPI_CH3_TX_CTRL_TRIG_PEND) { ++ rc = -EBUSY; ++ goto unlock_mtx_n_out; ++ } ++ ++ pkt = vmalloc(ioc->pkt_len); ++ if (!pkt) { ++ rc = -ENOMEM; ++ goto unlock_mtx_n_out; ++ } ++ ++ hdr = (struct espi_comm_hdr *)pkt; ++ ++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) { ++ rc = -EFAULT; ++ goto free_n_out; ++ } ++ ++ /* ++ * common header (i.e. cycle type, tag, and length) ++ * part is written to HW registers ++ */ ++ if (flash->dma.enable) { ++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr)); ++ dma_wmb(); ++ } else { ++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i) ++ writel(pkt[i], espi->regs + ESPI_CH3_TX_DATA); ++ } ++ ++ cyc = hdr->cyc; ++ tag = hdr->tag; ++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff); ++ ++ reg = FIELD_PREP(ESPI_CH3_TX_CTRL_CYC, cyc) ++ | FIELD_PREP(ESPI_CH3_TX_CTRL_TAG, tag) ++ | FIELD_PREP(ESPI_CH3_TX_CTRL_LEN, len) ++ | ESPI_CH3_TX_CTRL_TRIG_PEND; ++ writel(reg, espi->regs + ESPI_CH3_TX_CTRL); ++ ++ rc = 0; ++ ++free_n_out: ++ vfree(pkt); ++ ++unlock_mtx_n_out: ++ mutex_unlock(&flash->tx_mtx); ++ ++ return rc; ++} ++ ++static long ast2700_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) ++{ ++ struct ast2700_espi_flash *flash; ++ struct aspeed_espi_ioc ioc; ++ ++ flash = container_of(fp->private_data, struct ast2700_espi_flash, mdev); ++ ++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc))) ++ return -EFAULT; ++ ++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case ASPEED_ESPI_FLASH_GET_RX: ++ return ast2700_espi_flash_get_rx(fp, flash, &ioc); ++ case ASPEED_ESPI_FLASH_PUT_TX: ++ return ast2700_espi_flash_put_tx(fp, flash, &ioc); ++ }; ++ ++ return -EINVAL; ++} ++ ++static const struct file_operations ast2700_espi_flash_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = ast2700_espi_flash_ioctl, ++}; ++ ++static void ast2700_espi_flash_isr(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_flash *flash; ++ unsigned long flags; ++ uint32_t sts; ++ ++ flash = &espi->flash; ++ ++ sts = readl(espi->regs + ESPI_CH3_INT_STS); ++ ++ if (sts & ESPI_CH3_INT_STS_RX_CMPLT) { ++ writel(ESPI_CH3_INT_STS_RX_CMPLT, espi->regs + ESPI_CH3_INT_STS); ++ ++ spin_lock_irqsave(&flash->lock, flags); ++ flash->rx_ready = true; ++ spin_unlock_irqrestore(&flash->lock, flags); ++ ++ wake_up_interruptible(&flash->wq); ++ } ++} ++ ++static void ast2700_espi_flash_reset(struct ast2700_espi *espi) ++{ ++ uint32_t reg; ++ uint64_t mask; ++ struct ast2700_espi_flash *flash = &espi->flash; ++ ++ writel(0x0, espi->regs + ESPI_CH3_INT_EN); ++ writel(0xffffffff, espi->regs + ESPI_CH3_INT_STS); ++ ++ reg = readl(espi->regs + ESPI_CH3_CTRL); ++ reg &= ~(ESPI_CH3_CTRL_TX_RST ++ | ESPI_CH3_CTRL_RX_RST ++ | ESPI_CH3_CTRL_TX_DMA_EN ++ | ESPI_CH3_CTRL_RX_DMA_EN ++ | ESPI_CH3_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++ ++ udelay(1); ++ ++ reg |= (ESPI_CH3_CTRL_TX_RST | ESPI_CH3_CTRL_RX_RST); ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++ ++ if (flash->edaf.mode == EDAF_MODE_MIX) { ++ mask = ~(flash->edaf.size - 1); ++ writel(mask >> 32, espi->regs + ESPI_CH3_EDAF_MASKH); ++ writel(mask & 0xffffffff, espi->regs + ESPI_CH3_EDAF_MASKL); ++ writel(flash->edaf.taddr >> 32, espi->regs + ESPI_CH3_EDAF_TADDRH); ++ writel(flash->edaf.taddr & 0xffffffff, espi->regs + ESPI_CH3_EDAF_TADDRL); ++ } ++ ++ reg = readl(espi->regs + ESPI_CH3_CTRL) & ~ESPI_CH3_CTRL_EDAF_MODE; ++ reg |= FIELD_PREP(ESPI_CH3_CTRL_EDAF_MODE, flash->edaf.mode); ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++ ++ if (flash->dma.enable) { ++ writel(flash->dma.tx_addr >> 32, espi->regs + ESPI_CH3_TX_DMAH); ++ writel(flash->dma.tx_addr & 0xffffffff, espi->regs + ESPI_CH3_TX_DMAL); ++ writel(flash->dma.rx_addr >> 32, espi->regs + ESPI_CH3_RX_DMAH); ++ writel(flash->dma.rx_addr & 0xffffffff, espi->regs + ESPI_CH3_RX_DMAL); ++ ++ reg = readl(espi->regs + ESPI_CH3_CTRL) ++ | ESPI_CH3_CTRL_TX_DMA_EN ++ | ESPI_CH3_CTRL_RX_DMA_EN; ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++ } ++ ++ writel(ESPI_CH3_INT_EN_RX_CMPLT, espi->regs + ESPI_CH3_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH3_CTRL) | ESPI_CH3_CTRL_SW_RDY; ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++} ++ ++static int ast2700_espi_flash_probe(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_flash *flash; ++ struct device_node *np; ++ struct resource res; ++ struct device *dev; ++ void *virt; ++ int rc; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ init_waitqueue_head(&flash->wq); ++ ++ spin_lock_init(&flash->lock); ++ ++ mutex_init(&flash->tx_mtx); ++ mutex_init(&flash->rx_mtx); ++ ++ flash->edaf.mode = EDAF_MODE_HW; ++ ++ of_property_read_u32(dev->of_node, "flash-edaf-mode", &flash->edaf.mode); ++ dev_info(dev, "eDAF mode: 0x%x\n", flash->edaf.mode); ++ if (flash->edaf.mode == EDAF_MODE_MIX) { ++ np = of_parse_phandle(dev->of_node, "flash-edaf-tgt-addr", 0); ++ if (!np || of_address_to_resource(np, 0, &res)) { ++ dev_err(dev, "cannot get eDAF memory region\n"); ++ return -ENODEV; ++ } ++ ++ of_node_put(np); ++ ++ flash->edaf.taddr = res.start; ++ flash->edaf.size = resource_size(&res); ++ dev_info(dev, "eDAF address: 0x%llx\n", flash->edaf.taddr); ++ dev_info(dev, "eDAF size: 0x%llx\n", flash->edaf.size); ++ ++ virt = devm_ioremap_resource(dev, &res); ++ if (!virt) { ++ dev_err(dev, "cannot map eDAF memory region\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode"); ++ if (flash->dma.enable) { ++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL); ++ if (!flash->dma.tx_virt) { ++ dev_err(dev, "cannot allocate DMA TX buffer\n"); ++ return -ENOMEM; ++ } ++ ++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL); ++ if (!flash->dma.rx_virt) { ++ dev_err(dev, "cannot allocate DMA RX buffer\n"); ++ return -ENOMEM; ++ } ++ } ++ ++ flash->mdev.parent = dev; ++ flash->mdev.minor = MISC_DYNAMIC_MINOR; ++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash%d", DEVICE_NAME, espi->dev_id); ++ flash->mdev.fops = &ast2700_espi_flash_fops; ++ rc = misc_register(&flash->mdev); ++ if (rc) { ++ dev_err(dev, "cannot register device %s\n", flash->mdev.name); ++ return rc; ++ } ++ ++ ast2700_espi_flash_reset(espi); ++ ++ return 0; ++} ++ ++static int ast2700_espi_flash_remove(struct ast2700_espi *espi) ++{ ++ struct ast2700_espi_flash *flash; ++ struct device *dev; ++ uint32_t reg; ++ ++ dev = espi->dev; ++ ++ flash = &espi->flash; ++ ++ writel(0x0, espi->regs + ESPI_CH3_INT_EN); ++ ++ reg = readl(espi->regs + ESPI_CH3_CTRL); ++ reg &= ~(ESPI_CH3_CTRL_TX_DMA_EN ++ | ESPI_CH3_CTRL_RX_DMA_EN ++ | ESPI_CH3_CTRL_SW_RDY); ++ writel(reg, espi->regs + ESPI_CH3_CTRL); ++ ++ if (flash->dma.enable) { ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr); ++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr); ++ } ++ ++ mutex_destroy(&flash->tx_mtx); ++ mutex_destroy(&flash->rx_mtx); ++ ++ misc_deregister(&flash->mdev); ++ ++ return 0; ++} ++ ++/* global control */ ++static irqreturn_t ast2700_espi_isr(int irq, void *arg) ++{ ++ uint32_t sts; ++ struct ast2700_espi *espi = (struct ast2700_espi *)arg; ++ ++ sts = readl(espi->regs + ESPI_INT_STS); ++ if (!sts) ++ return IRQ_NONE; ++ ++ if (sts & ESPI_INT_STS_CH0) ++ ast2700_espi_perif_isr(espi); ++ ++ if (sts & ESPI_INT_STS_CH1) ++ ast2700_espi_vw_isr(espi); ++ ++ if (sts & ESPI_INT_STS_CH2) ++ ast2700_espi_oob_isr(espi); ++ ++ if (sts & ESPI_INT_STS_CH3) ++ ast2700_espi_flash_isr(espi); ++ ++ if (sts & ESPI_INT_STS_RST_DEASSERT) { ++ ast2700_espi_perif_sw_reset(espi); ++ ast2700_espi_perif_reset(espi); ++ ast2700_espi_vw_reset(espi); ++ ast2700_espi_oob_reset(espi); ++ ast2700_espi_flash_reset(espi); ++ writel(ESPI_INT_STS_RST_DEASSERT, espi->regs + ESPI_INT_STS); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int ast2700_espi_probe(struct platform_device *pdev) ++{ ++ struct ast2700_espi *espi; ++ struct resource *res; ++ struct device *dev; ++ struct regmap *scu1; ++ uint32_t reg; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL); ++ if (!espi) ++ return -ENOMEM; ++ ++ espi->dev = dev; ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return rc; ++ } ++ ++ scu1 = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); ++ if (IS_ERR(scu1)) { ++ dev_err(dev, "failed to find SCU1 regmap\n"); ++ return PTR_ERR(scu1); ++ } ++ rc = regmap_update_bits(scu1, SCU1_DDR, ++ SCU1_DDR_DIS_ESPI0_AHB | SCU1_DDR_DIS_ESPI1_AHB, ++ 0); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "cannot get resource\n"); ++ return -ENODEV; ++ } ++ ++ espi->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(espi->regs)) { ++ dev_err(dev, "cannot map registers\n"); ++ return PTR_ERR(espi->regs); ++ } ++ ++ espi->irq = platform_get_irq(pdev, 0); ++ if (espi->irq < 0) { ++ dev_err(dev, "cannot get IRQ number\n"); ++ return -ENODEV; ++ } ++ ++ espi->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(espi->clk)) { ++ dev_err(dev, "cannot get clock control\n"); ++ return PTR_ERR(espi->clk); ++ } ++ ++ rc = clk_prepare_enable(espi->clk); ++ if (rc) { ++ dev_err(dev, "cannot enable clocks\n"); ++ return rc; ++ } ++ ++ espi->dev_id = ida_alloc(&ast2700_espi_ida, GFP_KERNEL); ++ if (espi->dev_id < 0) { ++ dev_err(dev, "cannote allocate device ID\n"); ++ return espi->dev_id; ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ rc = ast2700_espi_perif_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init CH0, rc=%d\n", rc); ++ return rc; ++ } ++ ++ rc = ast2700_espi_vw_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init CH1, rc=%d\n", rc); ++ goto err_remove_perif; ++ } ++ ++ rc = ast2700_espi_oob_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init CH2, rc=%d\n", rc); ++ goto err_remove_vw; ++ } ++ ++ rc = ast2700_espi_flash_probe(espi); ++ if (rc) { ++ dev_err(dev, "cannot init CH3, rc=%d\n", rc); ++ goto err_remove_oob; ++ } ++ ++ rc = devm_request_irq(dev, espi->irq, ast2700_espi_isr, 0, dev_name(dev), espi); ++ if (rc) { ++ dev_err(dev, "cannot request IRQ\n"); ++ goto err_remove_flash; ++ } ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg |= ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ platform_set_drvdata(pdev, espi); ++ ++ dev_info(dev, "module loaded\n"); ++ ++ return 0; ++ ++err_remove_flash: ++ ast2700_espi_flash_remove(espi); ++err_remove_oob: ++ ast2700_espi_oob_remove(espi); ++err_remove_vw: ++ ast2700_espi_vw_remove(espi); ++err_remove_perif: ++ ast2700_espi_perif_remove(espi); ++ ++ return rc; ++} ++ ++static void ast2700_espi_remove(struct platform_device *pdev) ++{ ++ struct ast2700_espi *espi; ++ struct device *dev; ++ uint32_t reg; ++ int rc; ++ ++ dev = &pdev->dev; ++ ++ espi = platform_get_drvdata(pdev); ++ ++ reg = readl(espi->regs + ESPI_INT_EN); ++ reg &= ~ESPI_INT_EN_RST_DEASSERT; ++ writel(reg, espi->regs + ESPI_INT_EN); ++ ++ rc = ast2700_espi_perif_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2700_espi_vw_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2700_espi_oob_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++ ++ rc = ast2700_espi_flash_remove(espi); ++ if (rc) ++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc); ++} ++ ++static const struct of_device_id ast2700_espi_of_matches[] = { ++ { .compatible = "aspeed,ast2700-espi" }, ++ { }, ++}; ++ ++static struct platform_driver ast2700_espi_driver = { ++ .driver = { ++ .name = "ast2700-espi", ++ .of_match_table = ast2700_espi_of_matches, ++ }, ++ .probe = ast2700_espi_probe, ++ .remove = ast2700_espi_remove, ++}; ++ ++module_platform_driver(ast2700_espi_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_DESCRIPTION("Control of AST2700 eSPI Device"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/soc/aspeed/ast2700-espi.h b/drivers/soc/aspeed/ast2700-espi.h +--- a/drivers/soc/aspeed/ast2700-espi.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2700-espi.h 2025-12-23 10:16:21.126032636 +0000 +@@ -0,0 +1,281 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * Copyright 2023 Aspeed Technology Inc. ++ */ ++#ifndef _AST2700_ESPI_H_ ++#define _AST2700_ESPI_H_ ++ ++#include ++#include "aspeed-espi-comm.h" ++ ++/* SCU regiseters */ ++#define SCU1_DDR 0x0c8 ++#define SCU1_DDR_DIS_ESPI0_AHB BIT(0) ++#define SCU1_DDR_DIS_ESPI1_AHB BIT(1) ++ ++/* global registers */ ++#define ESPI_CTRL 0x000 ++#define ESPI_STS 0x004 ++#define ESPI_INT_STS 0x008 ++#define ESPI_INT_STS_RST_DEASSERT BIT(31) ++#define ESPI_INT_STS_RST_ASSERT BIT(30) ++#define ESPI_INT_STS_CH3 BIT(3) ++#define ESPI_INT_STS_CH2 BIT(2) ++#define ESPI_INT_STS_CH1 BIT(1) ++#define ESPI_INT_STS_CH0 BIT(0) ++#define ESPI_INT_EN 0x00c ++#define ESPI_INT_EN_RST_DEASSERT BIT(31) ++#define ESPI_INT_EN_RST_ASSERT BIT(30) ++#define ESPI_DEV_ID 0x010 ++#define ESPI_CAP_GEN 0x014 ++#define ESPI_CAP_GEN_RTC_SUP BIT(29) ++#define ESPI_CAP_CH0 0x018 ++#define ESPI_CAP_CH1 0x01c ++#define ESPI_CAP_CH2 0x020 ++#define ESPI_CAP_CH3_0 0x024 ++#define ESPI_CAP_CH3_1 0x028 ++#define ESPI_DEV_STS 0x030 ++#define ESPI_DBG_CTRL 0x034 ++#define ESPI_DBG_ADDRL 0x038 ++#define ESPI_DBG_ADDRH 0x03c ++#define ESPI_DBG_CMD 0x040 ++#define ESPI_DBG_RES 0x044 ++#define ESPI_CH_ACC_CTRL 0x04c ++#define ESPI_CH_ACC_OFST1 0x050 ++#define ESPI_CH_ACC_OFST2 0x054 ++#define ESPI_WPROT0 0x0f8 ++#define ESPI_WPROT1 0x0fc ++ ++/* peripheral channel (ch0) registers */ ++#define ESPI_CH0_CTRL 0x100 ++#define ESPI_CH0_CTRL_NP_TX_RST BIT(31) ++#define ESPI_CH0_CTRL_NP_RX_RST BIT(30) ++#define ESPI_CH0_CTRL_PC_TX_RST BIT(29) ++#define ESPI_CH0_CTRL_PC_RX_RST BIT(28) ++#define ESPI_CH0_CTRL_NP_TX_DMA_EN BIT(19) ++#define ESPI_CH0_CTRL_PC_TX_DMA_EN BIT(17) ++#define ESPI_CH0_CTRL_PC_RX_DMA_EN BIT(16) ++#define ESPI_CH0_CTRL_MCYC_RD_DIS_WDT BIT(9) ++#define ESPI_CH0_CTRL_MCYC_WR_DIS_WDT BIT(8) ++#define ESPI_CH0_CTRL_MCYC_RD_DIS BIT(6) ++#define ESPI_CH0_CTRL_MCYC_WR_DIS BIT(4) ++#define ESPI_CH0_CTRL_SW_RDY BIT(1) ++#define ESPI_CH0_STS 0x104 ++#define ESPI_CH0_INT_STS 0x108 ++#define ESPI_CH0_INT_STS_PC_RX_CMPLT BIT(0) ++#define ESPI_CH0_INT_EN 0x10c ++#define ESPI_CH0_INT_EN_PC_RX_CMPLT BIT(0) ++#define ESPI_CH0_PC_RX_DMAL 0x110 ++#define ESPI_CH0_PC_RX_DMAH 0x114 ++#define ESPI_CH0_PC_RX_CTRL 0x118 ++#define ESPI_CH0_PC_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_CH0_PC_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH0_PC_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH0_PC_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH0_PC_RX_DATA 0x11c ++#define ESPI_CH0_PC_TX_DMAL 0x120 ++#define ESPI_CH0_PC_TX_DMAH 0x124 ++#define ESPI_CH0_PC_TX_CTRL 0x128 ++#define ESPI_CH0_PC_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_CH0_PC_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH0_PC_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH0_PC_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH0_PC_TX_DATA 0x12c ++#define ESPI_CH0_NP_TX_DMAL 0x130 ++#define ESPI_CH0_NP_TX_DMAH 0x134 ++#define ESPI_CH0_NP_TX_CTRL 0x138 ++#define ESPI_CH0_NP_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_CH0_NP_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH0_NP_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH0_NP_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH0_NP_TX_DATA 0x13c ++#define ESPI_CH0_MCYC0_SADDRL 0x140 ++#define ESPI_CH0_MCYC0_SADDRH 0x144 ++#define ESPI_CH0_MCYC0_TADDRL 0x148 ++#define ESPI_CH0_MCYC0_TADDRH 0x14c ++#define ESPI_CH0_MCYC0_MASKL 0x150 ++#define ESPI_CH0_MCYC0_MASKL_EN BIT(0) ++#define ESPI_CH0_MCYC0_MASKH 0x154 ++#define ESPI_CH0_MCYC1_SADDRL 0x158 ++#define ESPI_CH0_MCYC1_SADDRH 0x15c ++#define ESPI_CH0_MCYC1_TADDRL 0x160 ++#define ESPI_CH0_MCYC1_TADDRH 0x164 ++#define ESPI_CH0_MCYC1_MASKL 0x168 ++#define ESPI_CH0_MCYC1_MASKL_EN BIT(0) ++#define ESPI_CH0_MCYC1_MASKH 0x16c ++#define ESPI_CH0_WPROT0 0x1f8 ++#define ESPI_CH0_WPROT1 0x1fc ++ ++/* virtual wire channel (ch1) registers */ ++#define ESPI_CH1_CTRL 0x200 ++#define ESPI_CH1_CTRL_GPIO_HW BIT(9) ++#define ESPI_CH1_CTRL_SW_RDY BIT(1) ++#define ESPI_CH1_STS 0x204 ++#define ESPI_CH1_INT_STS 0x208 ++#define ESPI_CH1_INT_STS_GPIO BIT(2) ++#define ESPI_CH1_INT_EN 0x20c ++#define ESPI_CH1_INT_EN_GPIO BIT(2) ++#define ESPI_CH1_EVT0 0x210 ++#define ESPI_CH1_EVT0_INT_EN 0x214 ++#define ESPI_CH1_EVT0_INT_T0 0x218 ++#define ESPI_CH1_EVT0_INT_T1 0x21c ++#define ESPI_CH1_EVT0_INT_T2 0x220 ++#define ESPI_CH1_EVT0_INT_STS 0x224 ++#define ESPI_CH1_EVT1 0x230 ++#define ESPI_CH1_EVT1_INT_EN 0x234 ++#define ESPI_CH1_EVT1_INT_T0 0x238 ++#define ESPI_CH1_EVT1_INT_T1 0x23c ++#define ESPI_CH1_EVT1_INT_T2 0x240 ++#define ESPI_CH1_EVT1_INT_STS 0x244 ++#define ESPI_CH1_GPIO_VAL0 0x250 ++#define ESPI_CH1_GPIO_VAL1 0x254 ++#define ESPI_CH1_GPIO_DIR0 0x258 ++#define ESPI_CH1_GPIO_DIR1 0x25c ++#define ESPI_CH1_GPIO_RSTSEL0 0x260 ++#define ESPI_CH1_GPIO_RSTSEL1 0x264 ++#define ESPI_CH1_GPIO_GRP 0x268 ++#define ESPI_CH1_GP50_DIR0 0x270 ++#define ESPI_CH1_GP50_DIR1 0x274 ++#define ESPI_CH1_GP50_VAL0 0x278 ++#define ESPI_CH1_GP50_VAL1 0x27c ++#define ESPI_CH1_SW_INT 0x280 ++#define ESPI_CH1_INT_RSTSEL0 0x284 ++#define ESPI_CH1_INT_RSTSEL1 0x288 ++#define ESPI_CH1_WPROT0 0x2f8 ++#define ESPI_CH1_WPROT1 0x2fc ++ ++/* out-of-band channel (ch2) registers */ ++#define ESPI_CH2_CTRL 0x300 ++#define ESPI_CH2_CTRL_TX_RST BIT(31) ++#define ESPI_CH2_CTRL_RX_RST BIT(30) ++#define ESPI_CH2_CTRL_TX_DMA_EN BIT(17) ++#define ESPI_CH2_CTRL_RX_DMA_EN BIT(16) ++#define ESPI_CH2_CTRL_SW_RDY BIT(4) ++#define ESPI_CH2_STS 0x304 ++#define ESPI_CH2_INT_STS 0x308 ++#define ESPI_CH2_INT_STS_RX_CMPLT BIT(0) ++#define ESPI_CH2_INT_EN 0x30c ++#define ESPI_CH2_INT_EN_RX_CMPLT BIT(0) ++#define ESPI_CH2_RX_DMAL 0x310 ++#define ESPI_CH2_RX_DMAH 0x314 ++#define ESPI_CH2_RX_CTRL 0x318 ++#define ESPI_CH2_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_CH2_RX_CTRL_PEC BIT(24) ++#define ESPI_CH2_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH2_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH2_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH2_RX_DATA 0x31c ++#define ESPI_CH2_TX_DMAL 0x320 ++#define ESPI_CH2_TX_DMAH 0x324 ++#define ESPI_CH2_TX_CTRL 0x328 ++#define ESPI_CH2_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_CH2_TX_CTRL_PEC BIT(24) ++#define ESPI_CH2_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH2_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH2_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH2_TX_DATA 0x32c ++#define ESPI_CH2_RX_DESC_EPTR 0x330 ++#define ESPI_CH2_RX_DESC_RPTR 0x334 ++#define ESPI_CH2_RX_DESC_RPTR_UPDATE BIT(31) ++#define ESPI_CH2_RX_DESC_RPTR_RP GENMASK(11, 0) ++#define ESPI_CH2_RX_DESC_WPTR 0x338 ++#define ESPI_CH2_RX_DESC_WPTR_VALID BIT(31) ++#define ESPI_CH2_RX_DESC_WPTR_SP GENMASK(27, 16) ++#define ESPI_CH2_RX_DESC_WPTR_WP GENMASK(11, 0) ++#define ESPI_CH2_RX_DESC_TMOUT 0x33c ++#define ESPI_CH2_TX_DESC_EPTR 0x340 ++#define ESPI_CH2_TX_DESC_RPTR 0x344 ++#define ESPI_CH2_TX_DESC_RPTR_UPT BIT(31) ++#define ESPI_CH2_TX_DESC_WPTR 0x348 ++#define ESPI_CH2_TX_DESC_WPTR_VALID BIT(31) ++#define ESPI_CH2_WPROT0 0x3f8 ++#define ESPI_CH2_WPROT1 0x3fc ++ ++/* flash channel (ch3) registers */ ++#define ESPI_CH3_CTRL 0x400 ++#define ESPI_CH3_CTRL_TX_RST BIT(31) ++#define ESPI_CH3_CTRL_RX_RST BIT(30) ++#define ESPI_CH3_CTRL_TX_DMA_EN BIT(17) ++#define ESPI_CH3_CTRL_RX_DMA_EN BIT(16) ++#define ESPI_CH3_CTRL_EDAF_MODE GENMASK(9, 8) ++#define ESPI_CH3_CTRL_SW_RDY BIT(5) ++#define ESPI_CH3_STS 0x404 ++#define ESPI_CH3_INT_STS 0x408 ++#define ESPI_CH3_INT_STS_RX_CMPLT BIT(0) ++#define ESPI_CH3_INT_EN 0x40c ++#define ESPI_CH3_INT_EN_RX_CMPLT BIT(0) ++#define ESPI_CH3_RX_DMAL 0x410 ++#define ESPI_CH3_RX_DMAH 0x414 ++#define ESPI_CH3_RX_CTRL 0x418 ++#define ESPI_CH3_RX_CTRL_SERV_PEND BIT(31) ++#define ESPI_CH3_RX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH3_RX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH3_RX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH3_RX_DATA 0x41c ++#define ESPI_CH3_TX_DMAL 0x420 ++#define ESPI_CH3_TX_DMAH 0x424 ++#define ESPI_CH3_TX_CTRL 0x428 ++#define ESPI_CH3_TX_CTRL_TRIG_PEND BIT(31) ++#define ESPI_CH3_TX_CTRL_LEN GENMASK(23, 12) ++#define ESPI_CH3_TX_CTRL_TAG GENMASK(11, 8) ++#define ESPI_CH3_TX_CTRL_CYC GENMASK(7, 0) ++#define ESPI_CH3_TX_DATA 0x42c ++#define ESPI_CH3_EDAF_TADDRL 0x430 ++#define ESPI_CH3_EDAF_TADDRH 0x434 ++#define ESPI_CH3_EDAF_MASKL 0x438 ++#define ESPI_CH3_EDAF_MASKH 0x43c ++#define ESPI_CH3_WPROT0 0x4f8 ++#define ESPI_CH3_WPROT1 0x4fc ++ ++/* eDAF filter registers */ ++#define ESPI_EDAF_FLTR_SADDR0 0x510 ++#define ESPI_EDAF_FLTR_EADDR0 0x514 ++#define ESPI_EDAF_FLTR_SADDR1 0x518 ++#define ESPI_EDAF_FLTR_EADDR1 0x51c ++#define ESPI_EDAF_FLTR_SADDR2 0x520 ++#define ESPI_EDAF_FLTR_EADDR2 0x524 ++#define ESPI_EDAF_FLTR_SADDR3 0x528 ++#define ESPI_EDAF_FLTR_EADDR3 0x52c ++#define ESPI_EDAF_FLTR_SADDR4 0x530 ++#define ESPI_EDAF_FLTR_EADDR4 0x534 ++#define ESPI_EDAF_FLTR_SADDR5 0x538 ++#define ESPI_EDAF_FLTR_EADDR5 0x53c ++#define ESPI_EDAF_FLTR_SADDR6 0x540 ++#define ESPI_EDAF_FLTR_EADDR6 0x544 ++#define ESPI_EDAF_FLTR_SADDR7 0x548 ++#define ESPI_EDAF_FLTR_EADDR7 0x54c ++#define ESPI_EDAF_FLTR_SADDR8 0x550 ++#define ESPI_EDAF_FLTR_EADDR8 0x554 ++#define ESPI_EDAF_FLTR_SADDR9 0x558 ++#define ESPI_EDAF_FLTR_EADDR9 0x55c ++#define ESPI_EDAF_FLTR_SADDR10 0x560 ++#define ESPI_EDAF_FLTR_EADDR10 0x564 ++#define ESPI_EDAF_FLTR_SADDR11 0x568 ++#define ESPI_EDAF_FLTR_EADDR11 0x56c ++#define ESPI_EDAF_FLTR_SADDR12 0x570 ++#define ESPI_EDAF_FLTR_EADDR12 0x574 ++#define ESPI_EDAF_FLTR_SADDR13 0x578 ++#define ESPI_EDAF_FLTR_EADDR13 0x57c ++#define ESPI_EDAF_FLTR_SADDR14 0x580 ++#define ESPI_EDAF_FLTR_EADDR14 0x584 ++#define ESPI_EDAF_FLTR_SADDR15 0x588 ++#define ESPI_EDAF_FLTR_EADDR15 0x58c ++#define ESPI_EDAF_WPROT0 0x5f8 ++#define ESPI_EDAF_WPROT1 0x5fc ++ ++/* MMBI registers */ ++#define ESPI_MMBI_CTRL 0x800 ++#define ESPI_MMBI_CTRL_INST_NUM GENMASK(6, 4) ++#define ESPI_MMBI_CTRL_EN BIT(0) ++#define ESPI_MMBI_INT_STS 0x808 ++#define ESPI_MMBI_INT_EN 0x80c ++#define ESPI_MMBI_HOST_RWP(x) (0x810 + ((x) << 3)) ++ ++enum ast2700_edaf_mode { ++ EDAF_MODE_MIX, ++ EDAF_MODE_SW, ++ EDAF_MODE_HW, ++ EDAF_MODES, ++}; ++ ++#endif +diff --git a/drivers/soc/aspeed/ast2700-otp.c b/drivers/soc/aspeed/ast2700-otp.c +--- a/drivers/soc/aspeed/ast2700-otp.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2700-otp.c 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,565 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright 2024 Aspeed Technology Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static DEFINE_SPINLOCK(otp_state_lock); ++ ++/*********************** ++ * * ++ * OTP regs definition * ++ * * ++ ***********************/ ++#define OTP_REG_SIZE 0x200 ++ ++#define OTP_PASSWD 0x349fe38a ++#define OTP_CMD_READ 0x23b1e361 ++#define OTP_CMD_PROG 0x23b1e364 ++#define OTP_CMD_PROG_MULTI 0x23b1e365 ++#define OTP_CMD_CMP 0x23b1e363 ++#define OTP_CMD_BIST 0x23b1e368 ++ ++#define OTP_CMD_OFFSET 0x20 ++#define OTP_MASTER OTP_M0 ++ ++#define OTP_KEY 0x0 ++#define OTP_CMD (OTP_MASTER * OTP_CMD_OFFSET + 0x4) ++#define OTP_WDATA_0 (OTP_MASTER * OTP_CMD_OFFSET + 0x8) ++#define OTP_WDATA_1 (OTP_MASTER * OTP_CMD_OFFSET + 0xc) ++#define OTP_WDATA_2 (OTP_MASTER * OTP_CMD_OFFSET + 0x10) ++#define OTP_WDATA_3 (OTP_MASTER * OTP_CMD_OFFSET + 0x14) ++#define OTP_STATUS (OTP_MASTER * OTP_CMD_OFFSET + 0x18) ++#define OTP_ADDR (OTP_MASTER * OTP_CMD_OFFSET + 0x1c) ++#define OTP_RDATA (OTP_MASTER * OTP_CMD_OFFSET + 0x20) ++ ++#define OTP_DBG00 0x0C4 ++#define OTP_DBG01 0x0C8 ++#define OTP_MASTER_PID 0x0D0 ++#define OTP_ECC_EN 0x0D4 ++#define OTP_CMD_LOCK 0x0D8 ++#define OTP_SW_RST 0x0DC ++#define OTP_SLV_ID 0x0E0 ++#define OTP_PMC_CQ 0x0E4 ++#define OTP_FPGA 0x0EC ++#define OTP_CLR_FPGA 0x0F0 ++#define OTP_REGION_ROM_PATCH 0x100 ++#define OTP_REGION_OTPCFG 0x104 ++#define OTP_REGION_OTPSTRAP 0x108 ++#define OTP_REGION_OTPSTRAP_EXT 0x10C ++#define OTP_REGION_SECURE0 0x120 ++#define OTP_REGION_SECURE0_RANGE 0x124 ++#define OTP_REGION_SECURE1 0x128 ++#define OTP_REGION_SECURE1_RANGE 0x12C ++#define OTP_REGION_SECURE2 0x130 ++#define OTP_REGION_SECURE2_RANGE 0x134 ++#define OTP_REGION_SECURE3 0x138 ++#define OTP_REGION_SECURE3_RANGE 0x13C ++#define OTP_REGION_USR0 0x140 ++#define OTP_REGION_USR0_RANGE 0x144 ++#define OTP_REGION_USR1 0x148 ++#define OTP_REGION_USR1_RANGE 0x14C ++#define OTP_REGION_USR2 0x150 ++#define OTP_REGION_USR2_RANGE 0x154 ++#define OTP_REGION_USR3 0x158 ++#define OTP_REGION_USR3_RANGE 0x15C ++#define OTP_REGION_CALIPTRA_0 0x160 ++#define OTP_REGION_CALIPTRA_0_RANGE 0x164 ++#define OTP_REGION_CALIPTRA_1 0x168 ++#define OTP_REGION_CALIPTRA_1_RANGE 0x16C ++#define OTP_REGION_CALIPTRA_2 0x170 ++#define OTP_REGION_CALIPTRA_2_RANGE 0x174 ++#define OTP_REGION_CALIPTRA_3 0x178 ++#define OTP_REGION_CALIPTRA_3_RANGE 0x17C ++#define OTP_RBP_SOC_SVN 0x180 ++#define OTP_RBP_SOC_KEYRETIRE 0x184 ++#define OTP_RBP_CALIP_SVN 0x188 ++#define OTP_RBP_CALIP_KEYRETIRE 0x18C ++#define OTP_PUF 0x1A0 ++#define OTP_MASTER_ID 0x1B0 ++#define OTP_MASTER_ID_EXT 0x1B4 ++#define OTP_R_MASTER_ID 0x1B8 ++#define OTP_R_MASTER_ID_EXT 0x1BC ++#define OTP_SOC_ECCKEY 0x1C0 ++#define OTP_SEC_BOOT_EN 0x1C4 ++#define OTP_SOC_KEY 0x1C8 ++#define OTP_CALPITRA_MANU_KEY 0x1CC ++#define OTP_CALPITRA_OWNER_KEY 0x1D0 ++#define OTP_FW_ID_LSB 0x1D4 ++#define OTP_FW_ID_MSB 0x1D8 ++#define OTP_CALIP_FMC_SVN 0x1DC ++#define OTP_CALIP_RUNTIME_SVN0 0x1E0 ++#define OTP_CALIP_RUNTIME_SVN1 0x1E4 ++#define OTP_CALIP_RUNTIME_SVN2 0x1E8 ++#define OTP_CALIP_RUNTIME_SVN3 0x1EC ++#define OTP_SVN_WLOCK 0x1F0 ++#define OTP_INTR_EN 0x200 ++#define OTP_INTR_STS 0x204 ++#define OTP_INTR_MID 0x208 ++#define OTP_INTR_FUNC_INFO 0x20C ++#define OTP_INTR_M_INFO 0x210 ++#define OTP_INTR_R_INFO 0x214 ++ ++#define OTP_PMC 0x400 ++#define OTP_DAP 0x500 ++ ++/* OTP status: [0] */ ++#define OTP_STS_IDLE 0x0 ++#define OTP_STS_BUSY 0x1 ++ ++/* OTP cmd status: [7:4] */ ++#define OTP_GET_CMD_STS(x) (((x) & 0xF0) >> 4) ++#define OTP_STS_PASS 0x0 ++#define OTP_STS_FAIL 0x1 ++#define OTP_STS_CMP_FAIL 0x2 ++#define OTP_STS_REGION_FAIL 0x3 ++#define OTP_STS_MASTER_FAIL 0x4 ++ ++/* OTP ECC EN */ ++#define ECC_ENABLE 0x1 ++#define ECC_DISABLE 0x0 ++#define ECCBRP_EN BIT(0) ++ ++#define ROM_REGION_START_ADDR 0x0 ++#define ROM_REGION_END_ADDR 0x3e0 ++#define RBP_REGION_START_ADDR ROM_REGION_END_ADDR ++#define RBP_REGION_END_ADDR 0x400 ++#define CONF_REGION_START_ADDR RBP_REGION_END_ADDR ++#define CONF_REGION_END_ADDR 0x420 ++#define STRAP_REGION_START_ADDR CONF_REGION_END_ADDR ++#define STRAP_REGION_END_ADDR 0x430 ++#define STRAPEXT_REGION_START_ADDR STRAP_REGION_END_ADDR ++#define STRAPEXT_REGION_END_ADDR 0x440 ++#define USER_REGION_START_ADDR STRAPEXT_REGION_END_ADDR ++#define USER_REGION_END_ADDR 0x1000 ++#define SEC_REGION_START_ADDR USER_REGION_END_ADDR ++#define SEC_REGION_END_ADDR 0x1c00 ++#define CAL_REGION_START_ADDR SEC_REGION_END_ADDR ++#define CAL_REGION_END_ADDR 0x1f80 ++#define SW_PUF_REGION_START_ADDR CAL_REGION_END_ADDR ++#define SW_PUF_REGION_END_ADDR 0x1fc0 ++#define HW_PUF_REGION_START_ADDR SW_PUF_REGION_END_ADDR ++#define HW_PUF_REGION_END_ADDR 0x2000 ++ ++#define OTP_MEMORY_SIZE (HW_PUF_REGION_END_ADDR * 2) ++ ++#define OTP_TIMEOUT_US 10000 ++ ++/* OTPSTRAP */ ++#define OTPSTRAP0_ADDR STRAP_REGION_START_ADDR ++#define OTPSTRAP14_ADDR (OTPSTRAP0_ADDR + 0xe) ++ ++#define OTPTOOL_VERSION(a, b, c) (((a) << 24) + ((b) << 12) + (c)) ++#define OTPTOOL_VERSION_MAJOR(x) (((x) >> 24) & 0xff) ++#define OTPTOOL_VERSION_PATCHLEVEL(x) (((x) >> 12) & 0xfff) ++#define OTPTOOL_VERSION_SUBLEVEL(x) ((x) & 0xfff) ++#define OTPTOOL_COMPT_VERSION 2 ++ ++enum otp_error_code { ++ OTP_SUCCESS, ++ OTP_READ_FAIL, ++ OTP_PROG_FAIL, ++ OTP_CMP_FAIL, ++}; ++ ++enum aspeed_otp_master_id { ++ OTP_M0 = 0, ++ OTP_M1, ++ OTP_M2, ++ OTP_M3, ++ OTP_M4, ++ OTP_M5, ++ OTP_MID_MAX, ++}; ++ ++struct aspeed_otp { ++ struct miscdevice miscdev; ++ struct device *dev; ++ void __iomem *base; ++ u32 chip_revid0; ++ u32 chip_revid1; ++ bool is_open; ++ int gbl_ecc_en; ++ u8 *data; ++}; ++ ++enum otp_ioctl_cmds { ++ GET_ECC_STATUS = 1, ++ SET_ECC_ENABLE, ++}; ++ ++enum otp_ecc_codes { ++ OTP_ECC_MISMATCH = -1, ++ OTP_ECC_DISABLE = 0, ++ OTP_ECC_ENABLE = 1, ++}; ++ ++static void otp_unlock(struct device *dev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(dev); ++ ++ writel(OTP_PASSWD, ctx->base + OTP_KEY); ++} ++ ++static void otp_lock(struct device *dev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(dev); ++ ++ writel(0x1, ctx->base + OTP_KEY); ++} ++ ++static int wait_complete(struct device *dev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(dev); ++ int ret; ++ u32 val; ++ ++ ret = readl_poll_timeout(ctx->base + OTP_STATUS, val, (val == 0x0), ++ 1, OTP_TIMEOUT_US); ++ if (ret) ++ dev_warn(dev, "timeout. sts:0x%x\n", val); ++ ++ return ret; ++} ++ ++static int otp_read_data(struct aspeed_otp *ctx, u32 offset, u16 *data) ++{ ++ struct device *dev = ctx->dev; ++ int ret; ++ ++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN); ++ writel(offset, ctx->base + OTP_ADDR); ++ writel(OTP_CMD_READ, ctx->base + OTP_CMD); ++ ret = wait_complete(dev); ++ if (ret) ++ return OTP_READ_FAIL; ++ ++ data[0] = readl(ctx->base + OTP_RDATA); ++ ++ return 0; ++} ++ ++static int otp_prog_data(struct aspeed_otp *ctx, u32 offset, u16 data) ++{ ++ struct device *dev = ctx->dev; ++ int ret; ++ ++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN); ++ writel(offset, ctx->base + OTP_ADDR); ++ writel(data, ctx->base + OTP_WDATA_0); ++ writel(OTP_CMD_PROG, ctx->base + OTP_CMD); ++ ret = wait_complete(dev); ++ if (ret) ++ return OTP_PROG_FAIL; ++ ++ return 0; ++} ++ ++static int otp_prog_multi_data(struct aspeed_otp *ctx, u32 offset, u32 *data, int count) ++{ ++ struct device *dev = ctx->dev; ++ int ret; ++ ++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN); ++ writel(offset, ctx->base + OTP_ADDR); ++ for (int i = 0; i < count; i++) ++ writel(data[i], ctx->base + OTP_WDATA_0 + 4 * i); ++ ++ writel(OTP_CMD_PROG_MULTI, ctx->base + OTP_CMD); ++ ret = wait_complete(dev); ++ if (ret) ++ return OTP_PROG_FAIL; ++ ++ return 0; ++} ++ ++static int aspeed_otp_read(struct aspeed_otp *ctx, int offset, ++ void *buf, int size) ++{ ++ struct device *dev = ctx->dev; ++ u16 *data = buf; ++ int ret; ++ ++ otp_unlock(dev); ++ for (int i = 0; i < size; i++) { ++ ret = otp_read_data(ctx, offset + i, data + i); ++ if (ret) { ++ dev_warn(ctx->dev, "read failed\n"); ++ break; ++ } ++ } ++ ++ otp_lock(dev); ++ return ret; ++} ++ ++static int aspeed_otp_write(struct aspeed_otp *ctx, int offset, ++ const void *buf, int size) ++{ ++ struct device *dev = ctx->dev; ++ u32 *data32 = (u32 *)buf; ++ u16 *data = (u16 *)buf; ++ int ret; ++ ++ otp_unlock(dev); ++ ++ if (size == 1) ++ ret = otp_prog_data(ctx, offset, data[0]); ++ else ++ ret = otp_prog_multi_data(ctx, offset, data32, size / 2); ++ ++ if (ret) ++ dev_warn(ctx->dev, "prog failed\n"); ++ ++ otp_lock(dev); ++ return ret; ++} ++ ++static int aspeed_otp_ecc_en(struct aspeed_otp *ctx) ++{ ++ struct device *dev = ctx->dev; ++ int ret = 0; ++ ++ /* Check ecc is already enabled */ ++ if (ctx->gbl_ecc_en == 1) ++ return 0; ++ ++ otp_unlock(dev); ++ ++ /* enable cfg ecc */ ++ ret = otp_prog_data(ctx, OTPSTRAP14_ADDR, 0x1); ++ if (ret) { ++ dev_warn(dev, "%s: prog failed\n", __func__); ++ goto end; ++ } ++ ++ ctx->gbl_ecc_en = 1; ++end: ++ otp_lock(dev); ++ ++ return ret; ++} ++ ++static long aspeed_otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ void __user *argp = (void __user *)arg; ++ struct otp_revid revid; ++ struct otp_read rdata; ++ struct otp_prog pdata; ++ int ret = 0; ++ ++ switch (cmd) { ++ case ASPEED_OTP_READ_DATA: ++ if (copy_from_user(&rdata, argp, sizeof(struct otp_read))) ++ return -EFAULT; ++ ++ ret = aspeed_otp_read(ctx, rdata.offset, ctx->data, rdata.len); ++ if (ret) ++ return -EFAULT; ++ ++ if (copy_to_user(rdata.data, ctx->data, rdata.len * 2)) ++ return -EFAULT; ++ ++ break; ++ ++ case ASPEED_OTP_PROG_DATA: ++ if (copy_from_user(&pdata, argp, sizeof(struct otp_prog))) ++ return -EFAULT; ++ ++ ret = aspeed_otp_write(ctx, pdata.w_offset, pdata.data, pdata.len); ++ break; ++ ++ case ASPEED_OTP_GET_ECC: ++ if (copy_to_user(argp, &ctx->gbl_ecc_en, sizeof(ctx->gbl_ecc_en))) ++ return -EFAULT; ++ break; ++ ++ case ASPEED_OTP_SET_ECC: ++ ret = aspeed_otp_ecc_en(ctx); ++ break; ++ ++ case ASPEED_OTP_GET_REVID: ++ revid.revid0 = ctx->chip_revid0; ++ revid.revid1 = ctx->chip_revid1; ++ if (copy_to_user(argp, &revid, sizeof(struct otp_revid))) ++ return -EFAULT; ++ break; ++ default: ++ dev_warn(ctx->dev, "cmd 0x%x is not supported\n", cmd); ++ break; ++ } ++ ++ return ret; ++} ++ ++static int aspeed_otp_ecc_init(struct device *dev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(dev); ++ int ret; ++ u32 val; ++ ++ otp_unlock(dev); ++ ++ /* Check cfg_ecc_en */ ++ writel(0, ctx->base + OTP_ECC_EN); ++ writel(OTPSTRAP14_ADDR, ctx->base + OTP_ADDR); ++ writel(OTP_CMD_READ, ctx->base + OTP_CMD); ++ ret = wait_complete(dev); ++ if (ret) ++ return OTP_READ_FAIL; ++ ++ val = readl(ctx->base + OTP_RDATA); ++ if (val & 0x1) ++ ctx->gbl_ecc_en = 0x1; ++ else ++ ctx->gbl_ecc_en = 0x0; ++ ++ otp_lock(dev); ++ ++ return 0; ++} ++ ++static int aspeed_otp_open(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ ++ spin_lock(&otp_state_lock); ++ ++ if (ctx->is_open) { ++ spin_unlock(&otp_state_lock); ++ return -EBUSY; ++ } ++ ++ ctx->is_open = true; ++ ++ spin_unlock(&otp_state_lock); ++ ++ return 0; ++} ++ ++static int aspeed_otp_release(struct inode *inode, struct file *file) ++{ ++ struct miscdevice *c = file->private_data; ++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev); ++ ++ spin_lock(&otp_state_lock); ++ ++ ctx->is_open = false; ++ ++ spin_unlock(&otp_state_lock); ++ ++ return 0; ++} ++ ++static const struct file_operations otp_fops = { ++ .owner = THIS_MODULE, ++ .unlocked_ioctl = aspeed_otp_ioctl, ++ .open = aspeed_otp_open, ++ .release = aspeed_otp_release, ++}; ++ ++static const struct of_device_id aspeed_otp_of_matches[] = { ++ { .compatible = "aspeed,ast2700-otp" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, aspeed_otp_of_matches); ++ ++static int aspeed_otp_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct regmap *scu0, *scu1; ++ struct aspeed_otp *priv; ++ struct resource *res; ++ int rc; ++ ++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n"); ++ return -ENOENT; ++ } ++ ++ priv->base = devm_ioremap_resource(&pdev->dev, res); ++ if (!priv->base) ++ return -EIO; ++ ++ scu0 = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu0"); ++ scu1 = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu1"); ++ if (IS_ERR(scu0) || IS_ERR(scu1)) { ++ dev_err(dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(scu0) || PTR_ERR(scu1); ++ } ++ ++ regmap_read(scu0, 0x0, &priv->chip_revid0); ++ regmap_read(scu1, 0x0, &priv->chip_revid1); ++ ++ priv->dev = dev; ++ dev_set_drvdata(dev, priv); ++ ++ /* OTP ECC init */ ++ rc = aspeed_otp_ecc_init(dev); ++ if (rc) ++ return -EIO; ++ ++ priv->data = kmalloc(OTP_MEMORY_SIZE, GFP_KERNEL); ++ if (!priv->data) ++ return -ENOMEM; ++ ++ /* Set up the miscdevice */ ++ priv->miscdev.minor = MISC_DYNAMIC_MINOR; ++ priv->miscdev.name = "aspeed-otp"; ++ priv->miscdev.fops = &otp_fops; ++ ++ /* Register the device */ ++ rc = misc_register(&priv->miscdev); ++ if (rc) { ++ dev_err(dev, "Unable to register device\n"); ++ return rc; ++ } ++ ++ dev_info(dev, "Aspeed OTP driver successfully registered\n"); ++ ++ return 0; ++} ++ ++static void aspeed_otp_remove(struct platform_device *pdev) ++{ ++ struct aspeed_otp *ctx = dev_get_drvdata(&pdev->dev); ++ ++ kfree(ctx->data); ++ misc_deregister(&ctx->miscdev); ++} ++ ++static struct platform_driver aspeed_otp_driver = { ++ .probe = aspeed_otp_probe, ++ .remove = aspeed_otp_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_otp_of_matches, ++ }, ++}; ++ ++module_platform_driver(aspeed_otp_driver); ++ ++MODULE_AUTHOR("Neal Liu "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("ASPEED OTP Driver"); +diff --git a/drivers/soc/aspeed/ast2700-rtc-over-espi.c b/drivers/soc/aspeed/ast2700-rtc-over-espi.c +--- a/drivers/soc/aspeed/ast2700-rtc-over-espi.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/ast2700-rtc-over-espi.c 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,193 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright 2025 Aspeed Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SYNC_INTERVAL_MS_DEFAULT 1000 ++ ++#define RTC_REG0 0x0 ++#define RTC_REG0_MINUTES_ALARM GENMASK(31, 24) ++#define RTC_REG0_MINUTES GENMASK(23, 16) ++#define RTC_REG0_SECONDS_ALARM GENMASK(15, 8) ++#define RTC_REG0_SECONDS GENMASK(7, 0) ++#define RTC_REG1 0x4 ++#define RTC_REG1_DAY_OF_MONTH GENMASK(31, 24) ++#define RTC_REG1_DAY_OF_WEEK GENMASK(23, 16) ++#define RTC_REG1_HOURS_ALARM GENMASK(15, 8) ++#define RTC_REG1_HOURS GENMASK(7, 0) ++#define RTC_REG2 0x8 ++#define RTC_REG2_YEAR GENMASK(15, 8) ++#define RTC_REG2_MONTH GENMASK(7, 0) ++#define RTC_REG_MAX 0xc ++ ++struct rtc_espi_sync { ++ struct rtc_device *rtc_dev; ++ struct device *dev; ++ void __iomem *espi_ram_base; ++ u32 interval_ms; ++ struct delayed_work sync_work; ++ struct workqueue_struct *wq; ++}; ++ ++static void sync_rtc_to_espi_work(struct work_struct *work) ++{ ++ struct rtc_espi_sync *ctx = container_of(to_delayed_work(work), struct rtc_espi_sync, sync_work); ++ struct rtc_time tm; ++ u32 reg, sec, min, hour, day_of_week, day_of_month, month, year; ++ u32 sec_alarm, min_alarm, hour_alarm; ++ int ret; ++ ++ if (!ctx->espi_ram_base) { ++ dev_err(ctx->dev, "eSPI RAM base not mapped\n"); ++ return; ++ } ++ ++ ret = rtc_read_time(ctx->rtc_dev, &tm); ++ if (ret < 0) { ++ dev_err(ctx->dev, "Failed to read RTC time: %d\n", ret); ++ goto reschedule; ++ } ++ dev_dbg(ctx->dev, "%s: tm data: secs=%d, mins=%d, hours=%d, mon=%d, year=%d\n", ++ __func__, tm.tm_sec, tm.tm_min, tm.tm_hour, tm.tm_mon, tm.tm_year); ++ dev_dbg(ctx->dev, "%s: tm data: wday(day of week)=%d, mday(day of month)=%d\n", ++ __func__, tm.tm_wday, tm.tm_mday); ++ ++ /* Write the eSPI RAM registers */ ++ reg = FIELD_PREP(RTC_REG0_SECONDS, tm.tm_sec) | ++ FIELD_PREP(RTC_REG0_MINUTES, tm.tm_min); ++ writel(reg, (ctx->espi_ram_base + RTC_REG0)); ++ ++ reg = FIELD_PREP(RTC_REG1_HOURS, tm.tm_hour) | ++ FIELD_PREP(RTC_REG1_DAY_OF_MONTH, tm.tm_mday) | ++ FIELD_PREP(RTC_REG1_DAY_OF_WEEK, tm.tm_wday + 1); ++ writel(reg, ctx->espi_ram_base + RTC_REG1); ++ ++ reg = FIELD_PREP(RTC_REG2_MONTH, tm.tm_mon + 1) | ++ FIELD_PREP(RTC_REG2_YEAR, tm.tm_year % 100); ++ writel(reg, ctx->espi_ram_base + RTC_REG2); ++ ++ /* Read back the values to verify */ ++ reg = readl(ctx->espi_ram_base + RTC_REG0); ++ sec = FIELD_GET(RTC_REG0_SECONDS, reg); ++ sec_alarm = FIELD_GET(RTC_REG0_SECONDS_ALARM, reg); ++ min = FIELD_GET(RTC_REG0_MINUTES, reg); ++ min_alarm = FIELD_GET(RTC_REG0_MINUTES_ALARM, reg); ++ ++ reg = readl(ctx->espi_ram_base + RTC_REG1); ++ hour = FIELD_GET(RTC_REG1_HOURS, reg); ++ hour_alarm = FIELD_GET(RTC_REG1_HOURS_ALARM, reg); ++ day_of_week = FIELD_GET(RTC_REG1_DAY_OF_WEEK, reg); ++ day_of_month = FIELD_GET(RTC_REG1_DAY_OF_MONTH, reg); ++ ++ reg = readl(ctx->espi_ram_base + RTC_REG2); ++ month = FIELD_GET(RTC_REG2_MONTH, reg); ++ year = FIELD_GET(RTC_REG2_YEAR, reg); ++ ++ dev_dbg(ctx->dev, "%s: eSPI RAM data: secs=%d, mins=%d, hours=%d, ", ++ __func__, sec, min, hour); ++ dev_dbg(ctx->dev, "day_of_week=%d, day_of_month=%d, month=%d, year=%d\n", ++ day_of_week, day_of_month, month, year); ++ dev_dbg(ctx->dev, "%s: eSPI RAM data: secs_alarm=%d, mins_alarm=%d, hours_alarm=%d\n", ++ __func__, sec_alarm, min_alarm, hour_alarm); ++ ++reschedule: ++ queue_delayed_work(ctx->wq, &ctx->sync_work, msecs_to_jiffies(ctx->interval_ms)); ++} ++ ++static int rtc_espi_sync_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct rtc_espi_sync *ctx; ++ struct device_node *np = dev->of_node; ++ struct rtc_device *rtc; ++ struct resource *res; ++ u32 interval = SYNC_INTERVAL_MS_DEFAULT; ++ ++ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) ++ return -ENOMEM; ++ ++ /* Ensure the device tree node is valid */ ++ rtc = rtc_class_open("rtc0"); ++ if (!rtc) { ++ dev_info(dev, "RTC not ready, deferring probe\n"); ++ return -EPROBE_DEFER; ++ } ++ ++ /* Check if the RTC device supports the read_time operation */ ++ if (!rtc->ops || !rtc->ops->read_time) { ++ dev_err(dev, "RTC device does not support read_time operation\n"); ++ rtc_class_close(rtc); ++ return -ENODEV; ++ } ++ ++ ctx->rtc_dev = rtc; ++ ctx->dev = dev; ++ ++ /* Get eSPI RAM handle and map */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ctx->espi_ram_base = devm_ioremap_resource(dev, res); ++ if (IS_ERR(ctx->espi_ram_base)) ++ return PTR_ERR(ctx->espi_ram_base); ++ ++ if (!ctx->espi_ram_base) ++ memset(ctx->espi_ram_base, 0, RTC_REG_MAX); ++ ++ /* Optional: polling interval */ ++ of_property_read_u32(np, "interval-ms", &interval); ++ ctx->interval_ms = interval; ++ ++ ctx->wq = alloc_workqueue("rtc_espi_sync_wq", WQ_UNBOUND, 0); ++ if (!ctx->wq) ++ return -ENOMEM; ++ ++ INIT_DELAYED_WORK(&ctx->sync_work, sync_rtc_to_espi_work); ++ queue_delayed_work(ctx->wq, &ctx->sync_work, 0); ++ ++ dev_info(dev, "RTC-eSPI RAM sync initialized, every %u ms\n", ctx->interval_ms); ++ ++ platform_set_drvdata(pdev, ctx); ++ return 0; ++} ++ ++static void rtc_espi_sync_remove(struct platform_device *pdev) ++{ ++ struct rtc_espi_sync *ctx = platform_get_drvdata(pdev); ++ ++ cancel_delayed_work_sync(&ctx->sync_work); ++ destroy_workqueue(ctx->wq); ++ ++ if (ctx->rtc_dev) ++ rtc_class_close(ctx->rtc_dev); ++} ++ ++static const struct of_device_id rtc_espi_sync_of_match[] = { ++ { .compatible = "aspeed,ast2700-rtc-over-espi" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, rtc_espi_sync_of_match); ++ ++static struct platform_driver rtc_espi_sync_driver = { ++ .driver = { ++ .name = "ast2700-rtc-over-espi", ++ .of_match_table = rtc_espi_sync_of_match, ++ }, ++ .probe = rtc_espi_sync_probe, ++ .remove = rtc_espi_sync_remove, ++}; ++module_platform_driver(rtc_espi_sync_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Kevin Chen "); ++MODULE_DESCRIPTION("RTC to eSPI RAM sync driver"); +diff --git a/drivers/soc/aspeed/rvas/Kconfig b/drivers/soc/aspeed/rvas/Kconfig +--- a/drivers/soc/aspeed/rvas/Kconfig 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/Kconfig 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,9 @@ ++menu "ASPEED RVAS drivers" ++ ++config ASPEED_RVAS ++ tristate "ASPEED RVAS driver" ++ default n ++ help ++ Driver for ASPEED RVAS Engine ++ ++endmenu +diff --git a/drivers/soc/aspeed/rvas/Makefile b/drivers/soc/aspeed/rvas/Makefile +--- a/drivers/soc/aspeed/rvas/Makefile 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/Makefile 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,3 @@ ++obj-$(CONFIG_ASPEED_RVAS) += rvas.o ++rvas-y := video_main.o hardware_engines.o video_engine.o ++ +diff --git a/drivers/soc/aspeed/rvas/hardware_engines.c b/drivers/soc/aspeed/rvas/hardware_engines.c +--- a/drivers/soc/aspeed/rvas/hardware_engines.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/hardware_engines.c 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,2201 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * File Name : hardware_engines.c ++ * Description : AST2600 frame grabber hardware engines ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "hardware_engines.h" ++#include "video.h" ++#include "video_debug.h" ++ ++static u32 dwBucketSizeRegOffset[BSE_MAX_BUCKET_SIZE_REGS] = { 0x20, 0x24, 0x28, ++ 0x2c, 0x30, 0x34, 0x38, 0x3c, 0x40, 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58, ++ 0x5c }; ++static u32 arrBuckSizeRegIndex[16] = { 3, 5, 8, 6, 1, 7, 11, 10, 14, 13, 2, 4, ++ 9, 12, 0, 15 }; ++ ++static struct Resolution resTable1[0x3B - 0x30 + 1] = { { 800, 600 }, { 1024, 768 }, { ++ 1280, 1024 }, { 1600, 1200 }, { 1920, 1200 }, { 1280, 800 }, ++ { 1440, 900 }, { 1680, 1050 }, { 1920, 1080 }, { 1366, 768 }, { 1600, ++ 900 }, { 1152, 864 }, }; ++ ++static struct Resolution resTable2[0x52 - 0x50 + 1] = { { 320, 240 }, { 400, 300 }, { ++ 512, 384 }, }; ++ ++static void prepare_bse_descriptor_2(struct Descriptor *pDAddress, ++ phys_addr_t source_addr, ++ phys_addr_t dest_addr, ++ bool bNotLastEntry, ++ u16 wStride, ++ u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, ++ u32 dwFetchHeight, ++ bool bInterrupt, ++ u8 byBuckSizeRegIndex); ++ ++static struct BSEAggregateRegister set_up_bse_bucket_2(struct AstRVAS *pAstRVAS, ++ u8 *abyBitIndexes, ++ u8 byTotalBucketCount, ++ u8 byBSBytesPerPixel, ++ u32 dwFetchWidthPixels, ++ u32 dwFetchHeight, ++ u32 dwBucketSizeIndex); ++ ++static inline u32 ast_video_read(void __iomem *video_reg_base, u32 reg) ++{ ++ u32 val = readl(video_reg_base + reg); ++ ++ return val; ++} ++ ++// Get color depth ++static void ast_video_get_color_mode(u8 byNewColorMode, struct VideoGeometry *pvg) ++{ ++ switch (byNewColorMode) { ++ case MODE_EGA: ++ pvg->gmt = VGAGraphicsMode; //4pp mode12/mode6A ++ pvg->byBitsPerPixel = 4; ++ break; ++ ++ case MODE_VGA: ++ pvg->gmt = VGAGraphicsMode; //mode 13 ++ pvg->byBitsPerPixel = 8; ++ break; ++ ++ case MODE_BPP16: ++ pvg->gmt = AGAGraphicsMode; ++ pvg->byBitsPerPixel = 16; ++ break; ++ ++ case MODE_BPP32: ++ pvg->gmt = AGAGraphicsMode; ++ pvg->byBitsPerPixel = 32; ++ break; ++ ++ case MODE_TEXT: ++ pvg->gmt = TextMode; ++ pvg->byBitsPerPixel = 0; ++ break; ++ ++ case MODE_CGA: ++ break; ++ ++ default: ++ pvg->byBitsPerPixel = 8; ++ break; ++ } ++} ++ ++//Mode ID mapping - use ID as index to the resolution table ++static void ast_video_get_indexed_mode(struct ModeInfo *pModeInfo, struct VideoGeometry *pvg) ++{ ++ u8 byModeIndex = (pModeInfo->byModeID & 0xf0); ++ ++ HW_ENG_DBG("Mode ID %#x\n", pModeInfo->byModeID); ++ pvg->byModeID = pModeInfo->byModeID; ++ ++ if (pModeInfo->byModeID == 0x12) { ++ pvg->wScreenWidth = 640; ++ pvg->wScreenHeight = 480; ++ } else if (byModeIndex == 0x20) { ++ pvg->wScreenWidth = 640; ++ pvg->wScreenHeight = 480; ++ } else if (byModeIndex == 0x30) { ++ pvg->wScreenWidth = ++ resTable1[pModeInfo->byModeID & 0x0f].wWidth; ++ pvg->wScreenHeight = ++ resTable1[pModeInfo->byModeID & 0x0f].wHeight; ++ } else if (byModeIndex == 0x50) { ++ pvg->wScreenWidth = ++ resTable2[pModeInfo->byModeID & 0x03].wWidth; ++ pvg->wScreenHeight = ++ resTable2[pModeInfo->byModeID & 0x03].wHeight; ++ } else if (byModeIndex == 0x60) { ++ pvg->wScreenWidth = 800; ++ pvg->wScreenHeight = 600; ++ } else { ++ HW_ENG_DBG("Mode ID %#x\n", pModeInfo->byModeID); ++ pvg->wScreenWidth = 0; ++ pvg->wScreenHeight = 0; ++ } ++} ++ ++//check special modes ++static void ast_video_set_special_modes(struct ModeInfo *pModeInfo, struct AstRVAS *pAstRVAS) ++{ ++ u8 byVGACR1 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x1); //number of chars per line ++ u8 byVGACR7 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x7); ++ u8 byVGACR12 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x12); ++ u8 byVGASR1 = readb(pAstRVAS->grce_reg_base + GRCE_SEQ_OFFSET + 0x1); ++ struct VideoGeometry *pvg = &pAstRVAS->current_vg; ++ u32 dwHorizontalDisplayEnd = 0; ++ u32 dwVerticalDisplayEnd = 0; ++ ++ dwHorizontalDisplayEnd = (byVGACR1 + 1) << 3; ++ dwVerticalDisplayEnd = (((byVGACR7 & 0x40) << 3) ++ | ((byVGACR7 & 0x2) << 7) | byVGACR12) + 1; ++ ++ HW_ENG_DBG("byVGACR1=0x%x,byVGACR7=0x%x,byVGACR12=0x%x\n", byVGACR1, ++ byVGACR7, byVGACR12); ++ HW_ENG_DBG("Mode ID %#x, dwHorizontalDisplayEnd 0x%x, dwVerticalDisplayEnd 0x%x\n", ++ pModeInfo->byModeID, dwHorizontalDisplayEnd, ++ dwVerticalDisplayEnd); ++ ++ // set up special mode ++ if (VGAGraphicsMode == pvg->gmt && pvg->byBitsPerPixel == 8) { // mode 13 ++ pvg->wScreenHeight = 200; ++ pvg->wScreenWidth = 320; ++ pvg->wStride = 320; ++ } else if (TextMode == pvg->gmt) { // text mode ++ pvg->wScreenHeight = dwVerticalDisplayEnd; ++ pvg->wScreenWidth = dwHorizontalDisplayEnd; ++ ++ if (!(byVGASR1 & 0x1)) ++ pvg->wScreenWidth += (byVGACR1 + 1); ++ ++ pvg->wStride = pvg->wScreenWidth; ++ } else if (pvg->byBitsPerPixel == 4) { ++ pvg->wStride = pvg->wScreenWidth; ++ } ++} ++ ++static u32 ast_video_get_pitch(struct AstRVAS *pAstRVAS) ++{ ++ u32 dwPitch = 0; ++ u8 byVGACR13 = 0; ++ u8 byVGACR14 = 0; ++ u8 byVGACR17 = 0; ++ u16 wOffsetUpper = 0; ++ u16 wOffset = 0; ++ struct VideoGeometry *pvg = &pAstRVAS->current_vg; ++ ++ //read actual register ++ byVGACR13 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x13); ++ byVGACR14 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x14); ++ byVGACR17 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x17); ++ wOffsetUpper = readb(pAstRVAS->grce_reg_base + 0xb0); ++ ++ wOffset = (wOffsetUpper << 8) | byVGACR13; ++ HW_ENG_DBG("wOffsetUpper= %#x, byVGACR13= %#x, byVGACR14= %#x, byVGACR17= %#x, wOffset= %#x\n", ++ wOffsetUpper, byVGACR13, byVGACR14, byVGACR17, wOffset); ++ ++ if (byVGACR14 & 0x40) ++ dwPitch = wOffset << 3; //DW mode ++ else if (byVGACR17 & 0x40) ++ dwPitch = wOffset << 1; //byte mode ++ else ++ dwPitch = wOffset << 2; //word mode ++ ++ if (pvg->gmt != TextMode) { ++ u8 byBppPowerOfTwo = 0; ++ ++ if (pvg->byBitsPerPixel == 32) ++ byBppPowerOfTwo = 2; ++ else if (pvg->byBitsPerPixel == 16) ++ byBppPowerOfTwo = 1; ++ else if (pvg->byBitsPerPixel == 8) ++ byBppPowerOfTwo = 0; ++ else ++ byBppPowerOfTwo = 3; // 4bpp ++ ++ //convert it to logic width in pixel ++ if (pvg->byBitsPerPixel > 4) ++ dwPitch >>= byBppPowerOfTwo; ++ else ++ dwPitch <<= byBppPowerOfTwo; ++ } ++ ++ return dwPitch; ++} ++ ++void update_video_geometry(struct AstRVAS *ast_rvas) ++{ ++ struct ModeInfo *pModeInfo; ++ struct NewModeInfoHeader *pNMIH; ++ struct DisplayEnd *pDE; ++ u8 byNewColorMode = 0; ++ u32 VGA_Scratch_Register_350 = 0; //VIDEO_NEW_MODE_INFO_HEADER ++ u32 VGA_Scratch_Register_354 = 0; //VIDEO_HDE ++ u32 VGA_Scratch_Register_34C = 0; //VIDEO_HDE ++ struct VideoGeometry *cur_vg = &ast_rvas->current_vg; ++ ++ VGA_Scratch_Register_350 = ast_video_read(ast_rvas->grce_reg_base, ++ AST_VIDEO_SCRATCH_350); ++ VGA_Scratch_Register_34C = ast_video_read(ast_rvas->grce_reg_base, ++ AST_VIDEO_SCRATCH_34C); ++ VGA_Scratch_Register_354 = ast_video_read(ast_rvas->grce_reg_base, ++ AST_VIDEO_SCRATCH_354); ++ ++ pModeInfo = (struct ModeInfo *)(&VGA_Scratch_Register_34C); ++ pNMIH = (struct NewModeInfoHeader *)(&VGA_Scratch_Register_350); ++ pDE = (struct DisplayEnd *)(&VGA_Scratch_Register_354); ++ HW_ENG_DBG("pModeInfo: byColorMode: %#x byModeID: %#x byRefreshRateIndex: %#x byScanLines: %#x\n", ++ pModeInfo->byColorMode, pModeInfo->byModeID, ++ pModeInfo->byRefreshRateIndex, pModeInfo->byScanLines); ++ HW_ENG_DBG("pNMIH: byColorDepth: %#x byDisplayInfo: %#x byMhzPixelClock: %#x byReserved: %#x\n", ++ pNMIH->byColorDepth, pNMIH->byDisplayInfo, ++ pNMIH->byMhzPixelClock, pNMIH->byReserved); ++ HW_ENG_DBG("pDE: HDE: %#x VDE: %#x\n", pDE->HDE, pDE->VDE); ++ ++ byNewColorMode = ((pModeInfo->byColorMode) & 0xf0) >> 4; ++ HW_ENG_DBG("byNewColorMode= %#x,byModeID=0x%x\n", byNewColorMode, ++ pModeInfo->byModeID); ++ ast_video_get_color_mode(byNewColorMode, cur_vg); ++ ++ if (pNMIH->byDisplayInfo == MODE_GET_INFO_DE) { ++ cur_vg->wScreenWidth = pDE->HDE; ++ cur_vg->wScreenHeight = pDE->VDE; ++ cur_vg->byBitsPerPixel = pNMIH->byColorDepth; ++ cur_vg->byModeID = pModeInfo->byModeID; ++ } else { ++ ast_video_get_indexed_mode(pModeInfo, cur_vg); ++ } ++ ++ cur_vg->wStride = (u16)ast_video_get_pitch(ast_rvas); ++ HW_ENG_DBG("Calculated pitch in pixels= %u\n", cur_vg->wStride); ++ ++ if (cur_vg->wStride < cur_vg->wScreenWidth) ++ cur_vg->wStride = cur_vg->wScreenWidth; ++ ++ HW_ENG_DBG("Before current display width %u, height %u, pitch %u, color depth %u, mode %d\n", ++ cur_vg->wScreenWidth, cur_vg->wScreenHeight, ++ cur_vg->wStride, cur_vg->byBitsPerPixel, cur_vg->gmt); ++ ++ if (cur_vg->gmt == TextMode || ++ (cur_vg->gmt == VGAGraphicsMode && pModeInfo->byModeID == 0x13)) { ++ ast_video_set_special_modes(pModeInfo, ast_rvas); ++ } ++ ++ //mode transition ++ if (cur_vg->wScreenHeight < 200 || cur_vg->wScreenWidth < 320) ++ cur_vg->gmt = InvalidMode; ++ ++ if (cur_vg->gmt == TextMode) { ++ u8 byVGACR9 = readb(ast_rvas->grce_reg_base + GRCE_CRTC_OFFSET + 0x9); ++ u32 dwCharacterHeight = ((byVGACR9) & 0x1f) + 1; ++ ++ HW_ENG_DBG("byModeID=0x%x,dwCharacterHeight=%d\n", ++ cur_vg->byModeID, dwCharacterHeight); ++ ++ if (dwCharacterHeight != 8 && dwCharacterHeight != 14 && ++ dwCharacterHeight != 16) ++ cur_vg->gmt = InvalidMode; ++ ++ if (cur_vg->wScreenWidth > 720 || cur_vg->wScreenHeight > 400) ++ cur_vg->gmt = InvalidMode; ++ } ++ ++ HW_ENG_DBG("current display width %u, height %u, pitch %u, color depth %u, mode %d\n", ++ cur_vg->wScreenWidth, cur_vg->wScreenHeight, ++ cur_vg->wStride, cur_vg->byBitsPerPixel, cur_vg->gmt); ++} ++ ++//check and update current video geometry ++bool video_geometry_change(struct AstRVAS *ast_rvas, u32 dwGRCEStatus) ++{ ++ bool b_geometry_changed = false; ++ struct VideoGeometry *cur_vg = &ast_rvas->current_vg; ++ struct VideoGeometry pre_vg; ++ ++ memcpy(&pre_vg, cur_vg, sizeof(pre_vg)); ++ update_video_geometry(ast_rvas); ++ b_geometry_changed = memcmp(&pre_vg, cur_vg, sizeof(struct VideoGeometry)) ++ != 0; ++ HW_ENG_DBG("b_geometry_changed: %d\n", b_geometry_changed); ++ return b_geometry_changed; ++} ++ ++void ioctl_get_video_geometry(struct RvasIoctl *ri, struct AstRVAS *ast_rvas) ++{ ++ memcpy(&ri->vg, &ast_rvas->current_vg, sizeof(struct VideoGeometry)); ++// HW_ENG_DBG("b_geometry_changed: %d\n", b_geometry_changed); ++} ++ ++void print_frame_buffer(u32 dwSizeByBytes, struct VGAMemInfo FBInfo) ++{ ++ u32 iter = 0; ++ phys_addr_t *frame_buffer_base = NULL; ++ u32 dwNumMappedPages = 0; ++ ++ dwNumMappedPages = ((dwSizeByBytes + 4095) >> 12); ++ frame_buffer_base = (phys_addr_t *)ioremap(FBInfo.qwFBPhysStart, dwNumMappedPages << 12); ++ ++ if (frame_buffer_base) { ++ HW_ENG_DBG("==============%s===========\n", __func__); ++ ++ for (iter = 0; iter < (dwSizeByBytes >> 2); iter++) { ++ HW_ENG_DBG("0x%x, ", frame_buffer_base[iter]); ++ ++ if ((iter % 16) == 0) ++ HW_ENG_DBG("\n"); ++ } ++ ++ HW_ENG_DBG("===========END=============\n"); ++ iounmap((void *)frame_buffer_base); ++ } ++} ++ ++void ioctl_get_grc_register(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ void *virt_add = 0; ++ u32 size = 0; ++ ++ HW_ENG_DBG("Start\n"); ++ virt_add = get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ size = ri->rmh1_mem_size; ++ ++ if (virt_is_valid_rsvd_mem((u32)ri->rmh, size, pAstRVAS)) { ++ memcpy((void *)virt_add, ++ (const void *)(pAstRVAS->grce_reg_base), 0x40); ++ memset((void *)(((u8 *)virt_add) + 0x40), 0x0, 0x20); ++ memcpy((void *)(((u8 *)virt_add) + 0x60), ++ (const void *)(pAstRVAS->grce_reg_base + 0x60), ++ GRCE_SIZE - 0x60); ++ ri->rs = SuccessStatus; ++ } else { ++ ri->rs = InvalidMemoryHandle; ++ } ++} ++ ++void ioctl_read_snoop_map(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ struct ContextTable *pct = get_context_entry(ri->rc, pAstRVAS); ++ void *virt_add = 0; ++ u32 size = 0; ++ ++ virt_add = get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ size = ri->rmh_mem_size; ++ ++ disable_grce_tse_interrupt(pAstRVAS); ++ HW_ENG_DBG("Start\n"); ++ ++ if (pct) { ++ if (virt_is_valid_rsvd_mem((u32)ri->rmh, size, pAstRVAS)) { ++ update_all_snoop_context(pAstRVAS); ++ memcpy((void *)virt_add, pct->aqwSnoopMap, ++ sizeof(pct->aqwSnoopMap)); ++ ++ if (ri->flag) { ++ ///get the context snoop address ++ memset(pct->aqwSnoopMap, 0x00, ++ sizeof(pct->aqwSnoopMap)); ++ memset(&pct->sa, 0x00, sizeof(pct->sa)); ++ } ++ ri->rs = SuccessStatus; ++ } else { ++ ri->rs = InvalidMemoryHandle; ++ } ++ } else { ++ ri->rs = InvalidContextHandle; ++ } ++ ++ enable_grce_tse_interrupt(pAstRVAS); ++} ++ ++void ioctl_read_snoop_aggregate(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ struct ContextTable *pct = get_context_entry(ri->rc, pAstRVAS); ++ ++ disable_grce_tse_interrupt(pAstRVAS); ++ ++ if (pct) { ++ update_all_snoop_context(pAstRVAS); ++ memcpy(&ri->sa, &pct->sa, sizeof(pct->sa)); ++ HW_ENG_DBG("ri->sa.qwCol: %#llx qwRow: %#llx flag: %u\n", ++ ri->sa.qwCol, ri->sa.qwRow, ri->flag); ++ ++ if (ri->flag) ++ memset(&pct->sa, 0x00, sizeof(pct->sa)); ++ ++ ri->rs = SuccessStatus; ++ } else { ++ ri->rs = InvalidContextHandle; ++ HW_ENG_DBG("Invalid Context\n"); ++ } ++ ++ enable_grce_tse_interrupt(pAstRVAS); ++} ++ ++void ioctl_set_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ void __iomem *addrTSICR; ++ ++ pAstRVAS->tse_tsicr = ri->tse_counter; ++ addrTSICR = pAstRVAS->fg_reg_base + TSE_TileSnoop_Interrupt_Count; ++ writel(pAstRVAS->tse_tsicr, addrTSICR);// max wait time before interrupt ++ ri->rs = SuccessStatus; ++} ++ ++void ioctl_get_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ ri->tse_counter = pAstRVAS->tse_tsicr; ++ ri->rs = SuccessStatus; ++} ++ ++// Get the screen offset from the GRC registers ++u32 get_screen_offset(struct AstRVAS *pAstRVAS) ++{ ++ u32 dwScreenOffset = 0; ++ void __iomem *addrVGACRC = pAstRVAS->grce_reg_base + GRCE_CRTC + 0xC; // Ch ++ void __iomem *addrVGACRD = pAstRVAS->grce_reg_base + GRCE_CRTC + 0xD; // Dh ++ void __iomem *addrVGACRAF = pAstRVAS->grce_reg_base + GRCE_CRTCEXT + 0x2F; ++ ++ if (pAstRVAS->current_vg.gmt == AGAGraphicsMode) { ++ dwScreenOffset = ((readb(addrVGACRAF) << 16) | ((readb(addrVGACRC)) << 8) | ++ (readb(addrVGACRD))); ++ dwScreenOffset *= pAstRVAS->current_vg.byBitsPerPixel >> 3; ++ } ++ ++ HW_ENG_DBG("ScreenOffset: %#8.8x\n", dwScreenOffset); ++ ++ return dwScreenOffset; ++} ++ ++void reset_snoop_engine(struct AstRVAS *pAstRVAS) ++{ ++ void __iomem *addr_snoop = pAstRVAS->fg_reg_base + TSE_SnoopMap_Offset; ++ u32 reg_value = 0; ++ u32 iter; ++ ++ writel(0x0, pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset); ++ writel(0x3, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_CS0Reg); ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_CS1Reg); ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_RS0Reg); ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_RS1Reg); ++ ++ //Clear TSRR00 to TSRR126 (TSRR01 to TSRR127), Snoop Map ++ for (iter = 0; iter < 0x80; ++iter) { ++ reg_value = readl(addr_snoop) + 1; ++ writel(reg_value, addr_snoop); ++ } ++ ++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_TileCount_Register_Offset); ++} ++ ++void set_snoop_engine(bool b_geom_chg, struct AstRVAS *pAstRVAS) ++{ ++ void __iomem *tscmd_reg = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset; ++ void __iomem *tsfbsa_reg = pAstRVAS->fg_reg_base + TSE_FrameBuffer_Offset; ++ void __iomem *tsulr_reg = pAstRVAS->fg_reg_base + TSE_UpperLimit_Offset; ++ u32 new_tsfbsa = 0; ++ u32 tscmd = 0; ++ u8 byBytesPerPixel = 0x0; ++ u8 byTSCMDBytesPerPixel = 0x0; ++ int cContext; ++ u32 dwStride; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ // Calculate Start Address into the Frame Buffer ++ new_tsfbsa = get_screen_offset(pAstRVAS); ++ tscmd = readl(tscmd_reg); ++ ++ tscmd &= (1 << TSCMD_INT_ENBL_BIT); ++ ++ HW_ENG_DBG("Latest TSFBSA: %#8.8x\n", new_tsfbsa); ++ HW_ENG_DBG("pAstRVAS->current_vg: bpp %u Mode:%#x gmt:%d Width:%u Height:%u Stride:%u\n", ++ pAstRVAS->current_vg.byBitsPerPixel, ++ pAstRVAS->current_vg.byModeID, pAstRVAS->current_vg.gmt, ++ pAstRVAS->current_vg.wScreenWidth, ++ pAstRVAS->current_vg.wScreenHeight, ++ pAstRVAS->current_vg.wStride); ++ ++ if (b_geom_chg || (readl(tsfbsa_reg) != new_tsfbsa)) { ++ byBytesPerPixel = pAstRVAS->current_vg.byBitsPerPixel >> 3; ++ ++ if (pAstRVAS->current_vg.gmt == VGAGraphicsMode || ++ pAstRVAS->current_vg.byBitsPerPixel == 4) { ++ byTSCMDBytesPerPixel = 0; ++ } else { ++ switch (byBytesPerPixel) { ++ case 1: ++ byTSCMDBytesPerPixel = 0; ++ break; ++ ++ case 2: ++ byTSCMDBytesPerPixel = 1; ++ break; ++ ++ case 3: ++ case 4: ++ byTSCMDBytesPerPixel = 2; ++ break; ++ } ++ } ++ dwStride = pAstRVAS->current_vg.wStride; ++ ++ if (byBytesPerPixel == 3) ++ dwStride = (dwStride + dwStride + dwStride) >> 2; ++ else if (pAstRVAS->current_vg.byBitsPerPixel == 4) ++ dwStride >>= 1; ++ ++ // set TSE SCR ++ // start the tile snoop engine ++ // flip the 15 bit ++ if (!(readl(tscmd_reg) & TSCMD_SCREEN_OWNER)) ++ tscmd |= TSCMD_SCREEN_OWNER; ++ ++ tscmd |= (dwStride << TSCMD_PITCH_BIT) | (1 << TSCMD_CPT_BIT) ++ | (1 << TSCMD_RPT_BIT) ++ | (byTSCMDBytesPerPixel << TSCMD_BPP_BIT) ++ | (1 << TSCMD_VGA_MODE_BIT) | (1 << TSCMD_TSE_ENBL_BIT); ++ HW_ENG_DBG("tscmd: %#8.8x\n", tscmd); ++ // set the TSFBSA & TSULR ++ writel(new_tsfbsa, tsfbsa_reg); ++ writel(BSE_UPPER_LIMIT, tsulr_reg); ++ writel(tscmd, tscmd_reg); ++ //reset snoop information ++ get_snoop_map_data(pAstRVAS); ++ memset((void *)pAstRVAS->accrued_sm, 0, ++ sizeof(pAstRVAS->accrued_sm)); ++ memset((void *)&pAstRVAS->accrued_sa, 0, ++ sizeof(pAstRVAS->accrued_sa)); ++ ++ for (cContext = 0; cContext < MAX_NUM_CONTEXT; cContext++) { ++ if (ppctContextTable[cContext]) { ++ memset(ppctContextTable[cContext]->aqwSnoopMap, ++ 0, ++ sizeof(ppctContextTable[cContext]->aqwSnoopMap)); ++ memset(&ppctContextTable[cContext]->sa, 0, ++ sizeof(ppctContextTable[cContext]->sa)); ++ } ++ } // for each context ++ } // if ++} ++ ++// ++// ReadSnoopMap to Clear ++// ++void get_snoop_map_data(struct AstRVAS *pAstRVAS) ++{ ++ u32 dwSMDword; ++ u64 aqwSnoopMap[SNOOP_MAP_QWORD_COUNT]; ++ //u32 dw_iter; ++ ++ get_snoop_aggregate(pAstRVAS); ++ memcpy((void *)aqwSnoopMap, ++ (const void *)(pAstRVAS->fg_reg_base + TSE_SnoopMap_Offset), ++ sizeof(aqwSnoopMap)); ++ ++ //HW_ENG_DBG("Snoop Map:\n"); ++ //HW_ENG_DBG("==========\n"); ++ ++ //for (dw_iter = 0; dw_iter < SNOOP_MAP_QWORD_COUNT; ++dw_iter) ++ //HW_ENG_DBG("[%2u]: 0x%16.16llx\n", dw_iter, aqwSnoopMap[dw_iter]); ++ ++ //HW_ENG_DBG("==========\n\n"); ++ ++ // copy 512 snoop map ++ for (dwSMDword = 0; dwSMDword < SNOOP_MAP_QWORD_COUNT; ++dwSMDword) ++ pAstRVAS->accrued_sm[dwSMDword] |= aqwSnoopMap[dwSMDword]; ++} ++ ++void get_snoop_aggregate(struct AstRVAS *pAstRVAS) ++{ ++ u64 qwRow = 0; ++ u64 qwCol = 0; ++ ++ // copy the snoop aggregate,row 64 bits ++ qwRow = readl(pAstRVAS->fg_reg_base + TSE_RS1Reg); ++ qwRow = qwRow << 32; ++ qwRow |= readl(pAstRVAS->fg_reg_base + TSE_RS0Reg); ++ ++ // column ++ qwCol = readl(pAstRVAS->fg_reg_base + TSE_CS1Reg); ++ qwCol = qwCol << 32; ++ qwCol |= readl(pAstRVAS->fg_reg_base + TSE_CS0Reg); ++ ++ HW_ENG_DBG("Snoop Aggregate Row: 0x%16.16llx\n", qwRow); ++ HW_ENG_DBG("Snoop Aggregate Col: 0x%16.16llx\n", qwCol); ++ HW_ENG_DBG("DRIVER:: %s\n", __func__); ++ HW_ENG_DBG("DRIVER:: row [%#llx]\n", qwRow); ++ HW_ENG_DBG("DRIVER:: col [%#llx]\n", qwCol); ++ ++ pAstRVAS->accrued_sa.qwCol |= qwCol; ++ pAstRVAS->accrued_sa.qwRow |= qwRow; ++} ++ ++u64 reinterpret_32bpp_snoop_row_as_24bpp(u64 theSnoopRow) ++{ ++ u64 qwResult = 0; ++ u64 qwSourceBit = 1; ++ u32 cSourceBit; ++ u64 qwBitResult = 0; ++ ++ for (cSourceBit = 0; cSourceBit < 64; ++cSourceBit) { ++ if (theSnoopRow & qwSourceBit) { ++ qwBitResult = ((cSourceBit * 128) / 96); ++ qwResult |= (((u64)3) << qwBitResult); ++ } ++ ++ qwSourceBit <<= 1; ++ } ++ ++ return qwResult; ++} ++ ++// ++//one tile: 32x32, ++// ++void convert_snoop_map(struct AstRVAS *pAstRVAS) ++{ ++ u32 dwAllRows = (pAstRVAS->current_vg.wScreenHeight + 31) >> 5; ++ u32 cRow; ++ ++ for (cRow = 0; cRow < dwAllRows; ++cRow) ++ pAstRVAS->accrued_sm[cRow] = ++ reinterpret_32bpp_snoop_row_as_24bpp(pAstRVAS->accrued_sm[cRow]); ++ ++ pAstRVAS->accrued_sa.qwCol = ++ reinterpret_32bpp_snoop_row_as_24bpp(pAstRVAS->accrued_sa.qwCol); ++} ++ ++void update_all_snoop_context(struct AstRVAS *pAstRVAS) ++{ ++ u32 cContext; ++ u32 iSMDword; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ if (pAstRVAS->current_vg.byBitsPerPixel == 24) ++ convert_snoop_map(pAstRVAS); ++ ++ for (cContext = 0; cContext < MAX_NUM_CONTEXT; cContext++) ++ if (ppctContextTable[cContext]) { ++ for (iSMDword = 0; iSMDword < SNOOP_MAP_QWORD_COUNT; ++ iSMDword++) ++ ppctContextTable[cContext]->aqwSnoopMap[iSMDword] |= ++ pAstRVAS->accrued_sm[iSMDword]; ++ ++ ppctContextTable[cContext]->sa.qwRow |= ++ pAstRVAS->accrued_sa.qwRow; ++ ppctContextTable[cContext]->sa.qwCol |= ++ pAstRVAS->accrued_sa.qwCol; ++ } ++ ++ //reset snoop map and aggregate ++ memset((void *)pAstRVAS->accrued_sm, 0, sizeof(pAstRVAS->accrued_sm)); ++ memset((void *)&pAstRVAS->accrued_sa, 0x00, ++ sizeof(pAstRVAS->accrued_sa)); ++} ++ ++static u32 setup_tfe_cr(struct FetchOperation *pfo) ++{ ++ u32 dwTFECR = 0; ++ ++ if (pfo->bEnableRLE) ++ dwTFECR = (pfo->byRLETripletCode << 24) ++ | (pfo->byRLERepeatCode << 16); ++ ++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK; ++ dwTFECR |= 1; ++ dwTFECR |= 1 << 1; // enabled IRQ ++ HW_ENG_DBG("dwTFECR: %#x\n", dwTFECR); ++ return dwTFECR; ++} ++ ++static void start_skip_mode_skip(struct Descriptor *desc_virt, ++ phys_addr_t desc_phys, ++ phys_addr_t source_phys, phys_addr_t dest_addr, u16 wStride, ++ u8 bytesPerPixel, u32 dwFetchWidthPixels, ++ u32 dwFetchHeight, bool bRLEOverFLow) ++{ ++ struct Descriptor *pVirtDesc = desc_virt; ++ ++ // Fetch Skipping data to a temp buffer ++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr, true, 1, ++ false, wStride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, ++ LowByteMode, bRLEOverFLow, 0); ++ ++ dest_addr += dwFetchWidthPixels * dwFetchHeight; ++ pVirtDesc++; ++ ++ if (bytesPerPixel == 3 || bytesPerPixel == 4) { ++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr, ++ true, 1, false, wStride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, ++ MiddleByteMode, bRLEOverFLow, 0); ++ ++ dest_addr += dwFetchWidthPixels * dwFetchHeight; ++ pVirtDesc++; ++ } ++ ++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr, false, 1, ++ false, wStride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, ++ TopByteMode, bRLEOverFLow, 1); ++} ++ ++// calculate pure fetch size ++static u32 calculate_fetch_size(enum SelectedByteMode sbm, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight) ++{ ++ u32 dwFetchSize = 0; ++ ++ switch (sbm) { ++ case AllBytesMode: ++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight ++ * bytesPerPixel; ++ break; ++ ++ case SkipMode: ++ if (bytesPerPixel == 3 || bytesPerPixel == 4) ++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight * 3; ++ else ++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight ++ * bytesPerPixel; ++ break; ++ ++ case PlanarToPackedMode: ++ dwFetchSize = (dwFetchWidthPixels * dwFetchHeight); ++ break; ++ ++ case PackedToPackedMode: ++ break; ++ ++ default: ++ HW_ENG_DBG("Mode= %d is not supported\n", sbm); ++ break; ++ } //switch ++ return dwFetchSize; ++} ++ ++static void display_fetch_info(struct FetchVideoTilesArg *pFVTDescriptor, u32 dwCD) ++{ ++ struct FetchRegion *pfr = NULL; ++ ++ pfr = &pFVTDescriptor->pfo[dwCD].fr; ++ HW_ENG_DBG("FETCH - 1 dwCD: %u\n", dwCD); ++ HW_ENG_DBG("pfr->wLeftX :%d\n", pfr->wLeftX); ++ HW_ENG_DBG("pfr->wTopY :%d\n", pfr->wTopY); ++ HW_ENG_DBG("pfr->wRightX :%d\n", pfr->wRightX); ++ HW_ENG_DBG("pfr->wBottomY :%d\n", pfr->wBottomY); ++ HW_ENG_DBG(" bEanbleRLE %d\n", pFVTDescriptor->pfo[dwCD].bEnableRLE); ++ HW_ENG_DBG("Stride : %d\n", pFVTDescriptor->vg.wStride); ++} ++ ++void ioctl_fetch_video_tiles(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ struct FetchVideoTilesArg *pFVTDescriptor; ++ u32 dwCD = 0; ++ struct Descriptor *pdesc_virt; ++ phys_addr_t qw_desc_phys; ++ phys_addr_t qw_source_phys; ++ phys_addr_t qw_destination_phys; ++ u8 bytesPerPixel; ++ struct FetchRegion *pfr; ++ bool bNotLastEntry = false; ++ u32 dwTFECR = 0; ++ u32 dwTotalFetchSize = 0; ++ u32 dwRLESize = 0; ++ bool bRLEOverFLow = false; ++ u32 dwFetchWidthPixels = 0; ++ u32 dwFetchHeight = 0; ++ phys_addr_t arg_phys = 0; ++ phys_addr_t data_phys_out = 0; ++ phys_addr_t data_phys_temp = 0; ++ u16 stride = 0; ++ bool bSkippingMode = false; ++ void *desc_virt = NULL; ++ phys_addr_t desc_phy = 0; ++ struct ContextTable *ctx_entry = NULL; ++ ++ HW_ENG_DBG("DRIVER:::: TILE FETCH CHAINING\n"); ++ ctx_entry = get_context_entry(ri->rc, pAstRVAS); ++ ++ if (ctx_entry) { ++ desc_virt = ctx_entry->desc_virt; ++ desc_phy = ctx_entry->desc_phy; ++ } else { ++ HW_ENG_DBG("Returning with invalid Context handle: 0x%p\n", ri->rc); ++ ri->rs = InvalidContextHandle; ++ return; ++ } ++ ++ ri->rs = SuccessStatus; ++ //struct FetchVideoTilesArg buffer ++ arg_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ //Fetch final dest buffer ++ data_phys_out = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS); ++ //Intermediate Buffer ++ data_phys_temp = get_phys_add_rsvd_mem((u32)ri->rmh2, pAstRVAS); ++ ++ qw_destination_phys = data_phys_out; ++ pFVTDescriptor = (struct FetchVideoTilesArg *)get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ HW_ENG_DBG("Destination virtual Add: 0x%llx\n", get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS)); ++ HW_ENG_DBG("Destination Physical Add: %llx\n", qw_destination_phys); ++ memset(desc_virt, 0x00, PAGE_SIZE); ++ ++ if (arg_phys && data_phys_out && data_phys_temp) { ++ pdesc_virt = (struct Descriptor *)desc_virt; ++ qw_desc_phys = desc_phy; ++ HW_ENG_DBG("Descriptor Virtual Addr: %llx\n", ++ (phys_addr_t)desc_virt); ++ HW_ENG_DBG("Descriptor Physical Addr: %llx\n", qw_desc_phys); ++ stride = pFVTDescriptor->vg.wStride; ++ ++ if (pFVTDescriptor->vg.byBitsPerPixel == 4) { ++ bytesPerPixel = 1; ++ stride >>= 1; ++ } else { ++ bytesPerPixel = pFVTDescriptor->vg.byBitsPerPixel >> 3; ++ } ++ ++ HW_ENG_DBG("u8 per pixel:%u\n", bytesPerPixel); ++ // fetch all data to Destination 1 without RLE ++ HW_ENG_DBG("FETCH - 0\n"); ++ HW_ENG_DBG("COUNT OF Operation: %u\n", pFVTDescriptor->cfo); ++ ++ for (dwCD = 0; dwCD < pFVTDescriptor->cfo; dwCD++) { ++ display_fetch_info(pFVTDescriptor, dwCD); ++ // Set up Control Register. ++ dwTFECR = setup_tfe_cr(&pFVTDescriptor->pfo[dwCD]); ++ pfr = &pFVTDescriptor->pfo[dwCD].fr; ++ // find Source Address ++ if (pFVTDescriptor->vg.byBitsPerPixel == 4) { ++ qw_source_phys = get_phy_fb_start_address(pAstRVAS) ++ + ((pfr->wLeftX * bytesPerPixel) >> 1) ++ + pfr->wTopY * stride ++ * bytesPerPixel; ++ ++ dwFetchWidthPixels = (pfr->wRightX - pfr->wLeftX + 1) >> 1; ++ } else { ++ qw_source_phys = get_phy_fb_start_address(pAstRVAS) ++ + pfr->wLeftX * bytesPerPixel ++ + pfr->wTopY * stride ++ * bytesPerPixel; ++ ++ dwFetchWidthPixels = (pfr->wRightX - pfr->wLeftX + 1); ++ } ++ HW_ENG_DBG("dwCD: %u qw_source_phys: %#x\n", dwCD, ++ qw_source_phys); ++ dwFetchHeight = pfr->wBottomY - pfr->wTopY + 1; ++ ++ HW_ENG_DBG("DESCRIPTOR virtual ADDRESS: 0x%p\n", ++ pdesc_virt); ++ if (pFVTDescriptor->vg.byBitsPerPixel == 4) ++ pFVTDescriptor->pfo[dwCD].sbm = ++ PlanarToPackedMode; ++ ++ pFVTDescriptor->pfo[dwCD].dwFetchSize = ++ calculate_fetch_size(pFVTDescriptor->pfo[dwCD].sbm, ++ bytesPerPixel, dwFetchWidthPixels, ++ dwFetchHeight); ++ bSkippingMode = ++ (pFVTDescriptor->pfo[dwCD].sbm == SkipMode) ? ++ true : false; ++ ++ if (bSkippingMode && bytesPerPixel > 1) { ++ u32 skipSrcAddr = qw_source_phys; ++ u32 skipDestAddr = qw_destination_phys; ++ u8 byPostBytesPerPixel = ++ (bytesPerPixel == 2) ? 2 : 3; ++ HW_ENG_DBG("In SkippingMode...\n"); ++ ++ if (pFVTDescriptor->pfo[dwCD].bEnableRLE) { ++ //skip data to intermediate buffer ++ skipDestAddr = data_phys_temp; ++ } ++ ++ start_skip_mode_skip(pdesc_virt, ++ qw_desc_phys, skipSrcAddr, ++ skipDestAddr, ++ pFVTDescriptor->vg.wStride, ++ bytesPerPixel, dwFetchWidthPixels, ++ dwFetchHeight, bRLEOverFLow); ++ ++ if (pFVTDescriptor->pfo[dwCD].bEnableRLE) { ++ u32 rleSrcAddr = skipDestAddr; ++ u32 rleDesAddr = qw_destination_phys; ++ ++ ///// take second look at skip mode for using map single ++ if (sleep_on_tfe_busy(pAstRVAS, ++ qw_desc_phys, // Descriptor physical Address ++ dwTFECR, // control register value ++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value ++ &dwRLESize, // out:: rle size ++ &pFVTDescriptor->pfo[dwCD].dwCheckSum ++ ) == false) { // out:: cs size ++ ri->rs = GenericError; ++ return; ++ } ++ ++ // perform RLE from Temp buffer to qw_destination_phys ++ //HW_ENG_DBG("skip rle\n"); ++ prepare_tfe_descriptor(pdesc_virt, ++ rleSrcAddr, rleDesAddr, ++ bNotLastEntry, 1, ++ pFVTDescriptor->pfo[dwCD].bEnableRLE, ++ dwFetchWidthPixels, ++ byPostBytesPerPixel, ++ dwFetchWidthPixels, ++ dwFetchHeight, AllBytesMode, ++ bRLEOverFLow, 1); ++ } ++ } else { ++ HW_ENG_DBG("Preparing TFE Descriptor with no skipping...\n"); ++ prepare_tfe_descriptor(pdesc_virt, ++ qw_source_phys, qw_destination_phys, ++ bNotLastEntry, 1, ++ pFVTDescriptor->pfo[dwCD].bEnableRLE, ++ stride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, ++ pFVTDescriptor->pfo[dwCD].sbm, ++ bRLEOverFLow, 1); ++ HW_ENG_DBG("Successfully prepared TFE Descriptor with no skipping\n"); ++ } ++ HW_ENG_DBG("Sleeping while TFE is busy...\n"); ++ ++ if (sleep_on_tfe_busy(pAstRVAS, qw_desc_phys, // Descriptor physical Address ++ dwTFECR, // control register value ++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value ++ &dwRLESize, // out:: rle size ++ &pFVTDescriptor->pfo[dwCD].dwCheckSum ++ ) == false) { // out:: cs size ++ ri->rs = GenericError; ++ return; ++ } ++ ++ HW_ENG_DBG("After sleep where TFE was busy\n"); ++ ++ //HW_ENG_DBG("skip rle end\n"); ++ if (!pFVTDescriptor->pfo[dwCD].bEnableRLE) { // RLE not enabled ++ HW_ENG_DBG("RLE is off\n"); ++ pFVTDescriptor->pfo[dwCD].bRLEFailed = false; ++ dwRLESize = ++ pFVTDescriptor->pfo[dwCD].dwFetchSize; ++ dwTotalFetchSize += ++ pFVTDescriptor->pfo[dwCD].dwFetchSize; ++ } else { // RLE enabled ++ HW_ENG_DBG("RLE Enabled\n"); ++ if (dwRLESize ++ >= pFVTDescriptor->pfo[dwCD].dwFetchSize) { // FAILED ++ HW_ENG_DBG("DRVIER:: RLE failed RLE: %u > %u\n", ++ dwRLESize, ++ pFVTDescriptor->pfo[dwCD].dwFetchSize); ++ pFVTDescriptor->pfo[dwCD].bRLEFailed = ++ true; ++ ++ if (bSkippingMode) { ++ phys_addr_t skip_source_addr = ++ qw_source_phys; ++ phys_addr_t skip_dest_addr = ++ qw_destination_phys; ++ ++ start_skip_mode_skip(pdesc_virt, ++ qw_desc_phys, ++ skip_source_addr, ++ skip_dest_addr, ++ pFVTDescriptor->vg.wStride, ++ bytesPerPixel, ++ dwFetchWidthPixels, ++ dwFetchHeight, ++ bRLEOverFLow); ++ } else { ++ HW_ENG_DBG(" FETCH - 4\n"); ++ prepare_tfe_descriptor(pdesc_virt, ++ qw_source_phys, ++ qw_destination_phys, ++ bNotLastEntry, 1, false, ++ pFVTDescriptor->vg.wStride, ++ bytesPerPixel, ++ dwFetchWidthPixels, ++ dwFetchHeight, ++ pFVTDescriptor->pfo[dwCD].sbm, ++ bRLEOverFLow, 1); ++ } ++ ++ if (sleep_on_tfe_busy(pAstRVAS, ++ qw_desc_phys, // Descriptor physical Address ++ dwTFECR, // control register value ++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value ++ &dwRLESize, // out:: rle size ++ &pFVTDescriptor->pfo[dwCD].dwCheckSum ++ ) == false) { // out:: cs size ++ ri->rs = GenericError; ++ return; ++ } ++ ++ dwTotalFetchSize += ++ pFVTDescriptor->pfo[dwCD].dwFetchSize; ++ dwRLESize = ++ pFVTDescriptor->pfo[dwCD].dwFetchSize; ++ } else { //RLE successful ++ pFVTDescriptor->pfo[dwCD].bRLEFailed = ++ false; ++ dwTotalFetchSize += dwRLESize; ++ dwTotalFetchSize = (dwTotalFetchSize ++ + 0x3) & 0xfffffffc; ++ } ++ } //RLE Enabled ++ ++ pFVTDescriptor->pfo[dwCD].dwFetchRLESize = dwRLESize; ++ HW_ENG_DBG("DRIVER:: RLE: %u, nonRLE: %u\n", dwRLESize, ++ pFVTDescriptor->pfo[dwCD].dwFetchSize); ++ HW_ENG_DBG("FETCH:: loop FETCH size: %u\n", dwTotalFetchSize); ++ qw_destination_phys = data_phys_out + dwTotalFetchSize; ++ } //for TFE ++ ++ pFVTDescriptor->dwTotalOutputSize = dwTotalFetchSize; ++ HW_ENG_DBG("Fetch Size: %#x\n", dwTotalFetchSize); ++ } else { ++ dev_err(pAstRVAS->pdev, "Memory allocation failure\n"); ++ ri->rs = InvalidMemoryHandle; ++ } ++} // End - ioctl_fetch_video_tiles ++ ++void prepare_ldma_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, u32 dwLDMASize, u8 byNotLastEntry) ++{ ++ u8 byInterrupt = 0; ++ ++ HW_ENG_DBG("pDAddress: 0x%p\n", pDAddress); ++ ++ // initialize to 0 ++ pDAddress->dw0General = 0x00; ++ pDAddress->dw1FetchWidthLine = 0x00; ++ pDAddress->dw2SourceAddr = 0x00; ++ pDAddress->dw3DestinationAddr = 0x00; ++ ++ // initialize to 0 ++ if (!byNotLastEntry) ++ byInterrupt = 0x1; ++ ++ pDAddress->dw0General = ((dwLDMASize - 1) << 8) | (byNotLastEntry << 1) ++ | byInterrupt; ++ pDAddress->dw2SourceAddr = (u32)source_addr; ++ pDAddress->dw3DestinationAddr = (u32)dest_addr; ++ ++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General); ++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine); ++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr); ++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr); ++} ++ ++// ++// ioctl_run_length_encode_data - encode buffer data ++// ++void ioctl_run_length_encode_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ struct Descriptor *pDescriptorAdd = NULL; ++ struct Descriptor *pDescriptorAddPhys = NULL; ++ u8 bytesPerPixel; ++ bool bNotLastEntry = true; ++ u32 dwTFECR = 0; ++ bool bRLEOverFLow = false; ++ u32 dwFetchWidthPixels = 0; ++ u32 dwFetchHeight = 0; ++ u32 dwPhysAddIn; ++ u32 dwPhysAddOut; ++ u32 data_size = 0; ++ void *desc_virt = NULL; ++ u32 desc_phy = 0; ++ struct ContextTable *ctx_entry = NULL; ++ ++ ctx_entry = get_context_entry(ri->rc, pAstRVAS); ++ if (ctx_entry) { ++ desc_virt = ctx_entry->desc_virt; ++ desc_phy = ctx_entry->desc_phy; ++ } else { ++ ri->rs = InvalidContextHandle; ++ return; ++ } ++ ++ ri->rs = SuccessStatus; ++ ++ dwPhysAddIn = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ dwPhysAddOut = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS); ++ ++ data_size = ri->rmh_mem_size; ++ pDescriptorAdd = (struct Descriptor *)ctx_entry->desc_virt; ++ pDescriptorAddPhys = (struct Descriptor *)ctx_entry->desc_phy; ++ ++ HW_ENG_DBG("pDescriptorAdd=%#x, phy=%#x\n", (u32)pDescriptorAdd, ++ (u32)pDescriptorAddPhys); ++ ++ if (dwPhysAddIn && dwPhysAddOut) { ++ // Enable TFE ++ dwTFECR = (ri->encode & 0xffff0000) << 16; ++ dwTFECR |= 1; ++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK; ++ ++ // triplet code and repeat code ++ bNotLastEntry = false; ++ bRLEOverFLow = true; ++ dwFetchWidthPixels = TILE_SIZE; ++ dwFetchHeight = data_size / TILE_SIZE; ++ bytesPerPixel = 1; ++ ++ prepare_tfe_descriptor(pDescriptorAdd, dwPhysAddIn, ++ dwPhysAddOut, bNotLastEntry, 1, ++ 1, dwFetchWidthPixels, ++ bytesPerPixel, dwFetchWidthPixels, ++ dwFetchHeight, AllBytesMode, bRLEOverFLow, 1); ++ ++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, ++ dwTFECR, data_size, &ri->rle_len, ++ &ri->rle_checksum) == false) { ++ ri->rs = GenericError; ++ dev_err(pAstRVAS->pdev, "%s sleep_on_tfe_busy ERROR\n", __func__); ++ return; ++ } ++ } else { ++ ri->rs = InvalidMemoryHandle; ++ } ++} ++ ++static u32 get_video_slice_fetch_width(u8 cBuckets) ++{ ++ u32 dwFetchWidthPixels = 0; ++ ++ switch (cBuckets) { ++ case 3: ++ dwFetchWidthPixels = ((TILE_SIZE << 5) * 3) >> 3; ++ break; ++ ++ case 8: ++ dwFetchWidthPixels = TILE_SIZE << 5; ++ break; ++ ++ case 16: ++ dwFetchWidthPixels = (TILE_SIZE << 5) * 2; ++ break; ++ ++ case 24: ++ dwFetchWidthPixels = (TILE_SIZE << 5) * 3; ++ break; ++ ++ default: ++ dwFetchWidthPixels = TILE_SIZE << 2; ++ break; ++ } ++ ++ return dwFetchWidthPixels; ++} ++ ++void ioctl_fetch_video_slices(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ struct FetchVideoSlicesArg *pFVSA; ++ u32 dwCD; ++ struct Descriptor *pdesc_virt; ++ phys_addr_t qw_desc_phys; ++ phys_addr_t source_addr; ++ phys_addr_t slice_dest_addr; ++ u8 bytesPerPixel; ++ bool bNotLastEntry = true; ++ bool bInterrupt = false; ++ u32 dwTFECR = 0; ++ u32 dwFetchSize = 0; ++ bool bRLEOverFLow = false; ++ u32 dwFetchWidthPixels = 0; ++ u32 dwFetchHeight = 0; ++ phys_addr_t arg_phys = 0; ++ phys_addr_t data_phys_out = 0; ++ phys_addr_t data_phys_rle = 0; ++ struct BSEAggregateRegister aBSEAR; ++ struct Descriptor *pNextDescriptor = 0; ++ phys_addr_t dest_next_addr = 0; ++ u32 dwBucketSizeIter = 0; ++ bool bBucketSizeEnable = 0; ++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register; ++ void *desc_virt = NULL; ++ phys_addr_t desc_phy = 0; ++ struct ContextTable *ctx_entry = get_context_entry(ri->rc, pAstRVAS); ++ ++ HW_ENG_DBG("Start\n"); ++ ++ if (ctx_entry) { ++ desc_virt = ctx_entry->desc_virt; ++ desc_phy = ctx_entry->desc_phy; ++ } else { ++ pr_err("BSE: Cannot get valid context\n"); ++ ri->rs = InvalidContextHandle; ++ return; ++ } ++ ++ arg_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ data_phys_out = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS); ++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh2, pAstRVAS); ++ ++ if (!arg_phys || !data_phys_out || !data_phys_rle) { ++ pr_err("BSE: Invalid memory handle\n"); ++ ri->rs = InvalidMemoryHandle; ++ return; ++ } ++ ri->rs = SuccessStatus; ++ slice_dest_addr = data_phys_out; ++ pFVSA = (struct FetchVideoSlicesArg *)get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ ++ HW_ENG_DBG("bEnableRLE: %d cBuckets: %u cfr: %u\n", pFVSA->bEnableRLE, ++ pFVSA->cBuckets, pFVSA->cfr); ++ ++ if (pFVSA->cfr > 1) { ++ writel(readl(addrBSCR) | BSE_ENABLE_MULT_BUCKET_SZS, addrBSCR); ++ bBucketSizeEnable = 1; ++ } else { ++ writel(readl(addrBSCR) & (~BSE_ENABLE_MULT_BUCKET_SZS), addrBSCR); ++ bBucketSizeEnable = 0; ++ } ++ ++ HW_ENG_DBG("*pdwBSCR: %#x bBucketSizeEnable: %d\n", readl(addrBSCR), ++ bBucketSizeEnable); ++ ++ pdesc_virt = ctx_entry->desc_virt; ++ qw_desc_phys = ctx_entry->desc_phy; ++ bytesPerPixel = pFVSA->vg.byBitsPerPixel >> 3; ++ ++ HW_ENG_DBG("BSE:: u8 per pixel: %d\n", bytesPerPixel); ++ HW_ENG_DBG("BSE:: cfr: %u bucket size: %d\n", pFVSA->cfr, pFVSA->cBuckets); ++ ++ pNextDescriptor = pdesc_virt; ++ dest_next_addr = slice_dest_addr; ++ // Prepare BSE Descriptors for all Regions ++ HW_ENG_DBG("pNextDescriptor 0x%p dest_next_addr: %#x\n", pNextDescriptor, ++ dest_next_addr); ++ ++ for (dwCD = 0; dwCD < pFVSA->cfr; dwCD++) { ++ HW_ENG_DBG("dwCD: %u\n", dwCD); ++ HW_ENG_DBG("pfr->wLeftX :%d\n", pFVSA->pfr[dwCD].wLeftX); ++ HW_ENG_DBG("pfr->wTopY :%d\n", pFVSA->pfr[dwCD].wTopY); ++ HW_ENG_DBG("pfr->wRightX :%d\n", pFVSA->pfr[dwCD].wRightX); ++ HW_ENG_DBG("pfr->wBottomY :%d\n", pFVSA->pfr[dwCD].wBottomY); ++ ++ source_addr = get_phy_fb_start_address(pAstRVAS) ++ + pFVSA->pfr[dwCD].wLeftX * bytesPerPixel ++ + pFVSA->pfr[dwCD].wTopY * pFVSA->vg.wStride ++ * bytesPerPixel; ++ dwFetchWidthPixels = (pFVSA->pfr[dwCD].wRightX ++ - pFVSA->pfr[dwCD].wLeftX + 1); ++ dwFetchHeight = pFVSA->pfr[dwCD].wBottomY ++ - pFVSA->pfr[dwCD].wTopY + 1; ++ ++ HW_ENG_DBG("BSE Width in Pixel: %d\n", dwFetchWidthPixels); ++ HW_ENG_DBG("BSE Height: %d bBucketSizeEnable: %d\n", dwFetchHeight, ++ bBucketSizeEnable); ++ ++ if (!bBucketSizeEnable) { ++ bNotLastEntry = false; ++ bInterrupt = true; ++ prepare_bse_descriptor(pdesc_virt, ++ source_addr, slice_dest_addr, ++ bNotLastEntry, pFVSA->vg.wStride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, bInterrupt); ++ dwFetchSize += (pFVSA->cBuckets ++ * (dwFetchWidthPixels * dwFetchHeight) >> 3); ++ aBSEAR = setUp_bse_bucket(pFVSA->abyBitIndexes, ++ pFVSA->cBuckets, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight); ++ ++ } else { ++ if (dwCD == pFVSA->cfr - 1) { ++ bNotLastEntry = false; ++ bInterrupt = true; ++ } else { ++ bNotLastEntry = true; ++ bInterrupt = false; ++ } ++ ++ prepare_bse_descriptor_2(pNextDescriptor, ++ source_addr, ++ dest_next_addr, bNotLastEntry, ++ pFVSA->vg.wStride, bytesPerPixel, ++ dwFetchWidthPixels, dwFetchHeight, ++ bInterrupt, ++ arrBuckSizeRegIndex[dwBucketSizeIter]); ++ ++ aBSEAR = set_up_bse_bucket_2(pAstRVAS, ++ pFVSA->abyBitIndexes, pFVSA->cBuckets, ++ bytesPerPixel, dwFetchWidthPixels, ++ dwFetchHeight, ++ arrBuckSizeRegIndex[dwBucketSizeIter]); ++ ++ dwBucketSizeIter++; ++ pNextDescriptor++; ++ dwFetchSize += pFVSA->cBuckets ++ * ((dwFetchWidthPixels * dwFetchHeight) >> 3); //each bucket size ++ dest_next_addr = slice_dest_addr ++ + dwFetchSize; ++ } ++ } ++ ++ //bse now ++ if (pFVSA->cBuckets <= FULL_BUCKETS_COUNT) { ++ if (bBucketSizeEnable) ++ aBSEAR.dwBSDBS = 0x80000000; ++ ++ HW_ENG_DBG("Sleeping on BSE to complete\n"); ++ ++ if (sleep_on_bse_busy(pAstRVAS, qw_desc_phys, aBSEAR, ++ dwFetchSize) == false) { ++ dev_err(pAstRVAS->pdev, ".....BSE Timeout\n"); ++ ri->rs = GenericError; ++ return; ++ } ++ } ++ HW_ENG_DBG("Fetched the bit slices\n"); ++ //RLE ++ pFVSA->dwSlicedSize = dwFetchSize; ++ pFVSA->dwSlicedRLESize = pFVSA->dwSlicedSize; ++ ++ // do RLE if RLE is on. Fetch from Destination 1 to Destination 2 with RLE on ++ bNotLastEntry = false; ++ ++ if (pFVSA->bEnableRLE) { ++ HW_ENG_DBG("BSE - 3 (RLE Enabled)\n"); ++ // Enable TFE ++ dwTFECR = ((pFVSA->byRLETripletCode << 24) ++ | (pFVSA->byRLERepeatCode << 16)); ++ dwTFECR |= ((0x1 << 1) | 1); ++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK; ++ ++ bRLEOverFLow = true; ++ bytesPerPixel = 1; ++ ++ dwFetchWidthPixels = get_video_slice_fetch_width(pFVSA->cBuckets); ++ dwFetchHeight = dwFetchSize / dwFetchWidthPixels; ++ ++ prepare_tfe_descriptor(pdesc_virt, data_phys_out, ++ data_phys_rle, bNotLastEntry, 1, pFVSA->bEnableRLE, ++ dwFetchWidthPixels, bytesPerPixel, dwFetchWidthPixels, ++ dwFetchHeight, 0, bRLEOverFLow, 1); ++ ++ HW_ENG_DBG("TFE-RLE Control Register value: 0x%x\n", dwTFECR); ++ ++ if (sleep_on_tfe_busy(pAstRVAS, qw_desc_phys, // Descriptor physical Address ++ dwTFECR, // control register value ++ dwFetchSize, // bandwidth limiter value ++ &pFVSA->dwSlicedRLESize, // out:: rle size ++ &pFVSA->dwCheckSum ++ ) == false) { ++ ri->rs = GenericError; ++ return; ++ } ++ ++ HW_ENG_DBG("Finishing RLE Fetching\n"); ++ ++ if (pFVSA->dwSlicedRLESize >= pFVSA->dwSlicedSize) ++ pFVSA->bRLEFailed = true; ++ else ++ pFVSA->bRLEFailed = false; ++ } // RLE enabled ++ ++ memcpy((void *)&dwFetchSize, (void *)&pFVSA->dwSlicedRLESize, 4); ++} ++ ++void ioctl_fetch_text_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ bool bRLEOn = ri->tfm.bEnableRLE; ++ ++ ri->rs = SuccessStatus; ++ ++ // first time fetch ++ on_fetch_text_data(ri, bRLEOn, pAstRVAS); ++} ++ ++void on_fetch_text_data(struct RvasIoctl *ri, bool bRLEOn, struct AstRVAS *pAstRVAS) ++{ ++ struct Descriptor *pDescriptorAdd; ++ struct Descriptor *pDescriptorAddPhys; ++ u32 dwScreenOffset = 0x00; ++ phys_addr_t source_addr = get_phy_fb_start_address(pAstRVAS); ++ phys_addr_t dest_addr; ++ bool bRLEOverFlow = false; ++ bool bInterrupt = true; ++ u32 wFetchLines = 0; ++ u8 byCharacterPerLine = 0; ++ u16 wFetchWidthInBytes = 0; ++ phys_addr_t data_phys = 0; ++ phys_addr_t data_phys_rle = 0; ++ phys_addr_t data_phys_temp = 0; ++ u32 dwCtrlRegValue = 0; ++ u32 dwMinBufSize = 0; ++ void *desc_virt = NULL; ++ phys_addr_t desc_phy = 0; ++ struct ContextTable *ctx_entry = NULL; ++ ++ HW_ENG_DBG("Start\n"); ++ ctx_entry = get_context_entry(ri->rc, pAstRVAS); ++ if (ctx_entry) { ++ desc_virt = ctx_entry->desc_virt; ++ desc_phy = ctx_entry->desc_phy; ++ } else { ++ ri->rs = InvalidContextHandle; ++ return; ++ } ++ ++ wFetchLines = get_text_mode_fetch_lines(pAstRVAS, ri->vg.wScreenHeight); ++ byCharacterPerLine = get_text_mode_character_per_line(pAstRVAS, ++ ri->vg.wScreenWidth); ++ ++ data_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS); ++ ++ if (!data_phys || !data_phys_rle) { ++ ri->rs = InvalidMemoryHandle; ++ dev_err(pAstRVAS->pdev, "Fetch Text: Invalid Memoryhandle\n"); ++ return; ++ } ++ ++ dwMinBufSize = (byCharacterPerLine * wFetchLines) << 1; ++ ++ if (ri->rmh_mem_size < dwMinBufSize) { ++ //either buffer is too small or invalid data in registers ++ ri->rs = GenericError; ++ dev_err(pAstRVAS->pdev, "Fetch Text: required buffer len:0x%x\n", dwMinBufSize); ++ return; ++ } ++ memset(desc_virt, 0x00, MAX_DESC_SIZE); ++ pDescriptorAdd = desc_virt; ++ pDescriptorAddPhys = (struct Descriptor *)desc_phy; ++ dest_addr = data_phys; ++ ++ // Enable TFE ++ dwCtrlRegValue |= 1; ++ dwCtrlRegValue &= TFCTL_DESCRIPTOR_IN_DDR_MASK; ++ // set up the text alignment ++ dwScreenOffset = get_screen_offset(pAstRVAS); ++ source_addr += dwScreenOffset; ++ HW_ENG_DBG("screen offset:%#x, Source start Addr: %%llx\n", dwScreenOffset, ++ source_addr); ++ if (ri->tfm.dpm == AttrMode) { // ATTR and ASCII ++ data_phys_temp = data_phys_rle; ++ wFetchWidthInBytes = byCharacterPerLine << 3; ++ // must fetch both ascii & attr ++ HW_ENG_DBG("Attribute and ASCII\n"); ++ prepare_tfe_text_descriptor(desc_virt, source_addr, ++ data_phys_temp, ++ false, wFetchWidthInBytes, wFetchLines, ++ ri->tfm.dpm, bRLEOverFlow, bInterrupt); ++ ri->tfm.dwFetchSize = (byCharacterPerLine * wFetchLines) << 1; ++ } else if (ri->tfm.dpm == AsciiOnlyMode) { ++ wFetchWidthInBytes = byCharacterPerLine << 3; ++ HW_ENG_DBG("ASCII Only\n"); ++ prepare_tfe_text_descriptor(desc_virt, source_addr, ++ dest_addr, ++ false, wFetchWidthInBytes, wFetchLines, ++ ri->tfm.dpm, bRLEOverFlow, bInterrupt); ++ ri->tfm.dwFetchSize = byCharacterPerLine * wFetchLines; ++ } else if (ri->tfm.dpm == FontFetchMode) { ++ wFetchWidthInBytes = byCharacterPerLine << 2; ++ HW_ENG_DBG("Font Only\n"); ++ prepare_tfe_text_descriptor(desc_virt, source_addr, ++ dest_addr, ++ false, wFetchWidthInBytes, ++ wFetchLines + 256, ++ ri->tfm.dpm, bRLEOverFlow, bInterrupt); ++ ++ ri->tfm.dwFetchSize = MAX_TEXT_DATA_SIZE; ++ } ++ dwCtrlRegValue |= 1 << 1; // enabled IRQ ++ if (ri->tfm.dpm == AttrMode) { ++ if (sleep_on_tfe_text_busy(pAstRVAS, desc_phy, dwCtrlRegValue, // control register value ++ ri->tfm.dwFetchSize, // bandwidth limitor value ++ &ri->tfm.dwFetchRLESize, // out:: rle size ++ &ri->tfm.dwCheckSum) == false) { ++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for attributes\n"); ++ ri->rs = GenericError; ++ return; ++ } ++ } else { ++ if (sleep_on_tfe_text_busy(pAstRVAS, desc_phy, dwCtrlRegValue, ++ ri->tfm.dwFetchSize, &ri->tfm.dwFetchRLESize, ++ &ri->tfm.dwCheckSum) == false) { ++ ri->rs = GenericError; ++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for others\n"); ++ return; ++ } ++ } ++ ++ if (ri->tfm.dpm == AttrMode) { ++ //separate ATTR from ATTR+ASCII ++ source_addr = data_phys_temp; ++ dest_addr = data_phys; ++ prepare_tfe_descriptor(desc_virt, data_phys_temp, data_phys, ++ false, //not last entry? ++ 1, //checksum ++ false, //RLE? ++ byCharacterPerLine, ++ 2, //byBpp, ++ byCharacterPerLine, wFetchLines, TopByteMode, ++ bRLEOverFlow, bInterrupt); ++ ++ ri->tfm.dwFetchSize = byCharacterPerLine * wFetchLines; ++ ++ dwCtrlRegValue |= 1 << 1; // enabled IRQ ++ if (sleep_on_tfe_text_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, ++ dwCtrlRegValue, ri->tfm.dwFetchSize, ++ &ri->tfm.dwFetchRLESize, &ri->tfm.dwCheckSum) == false) { ++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for attributes # 2\n"); ++ ri->rs = GenericError; ++ return; ++ } ++ } ++ // RLE enabled ++ if (bRLEOn) { ++ bRLEOverFlow = true; ++ dwCtrlRegValue = 1; ++ dwCtrlRegValue |= (ri->tfm.byRLETripletCode << 24) ++ | (ri->tfm.byRLERepeatCode << 16); ++ source_addr = dest_addr; ++ dest_addr = data_phys_rle; ++ ++ // RLE only ++ prepare_tfe_descriptor(pDescriptorAdd, source_addr, ++ dest_addr, ++ false, //not last entry? ++ 1, //checksum ++ bRLEOn, //RLE? ++ ri->tfm.dwFetchSize / wFetchLines, 1, ++ ri->tfm.dwFetchSize / wFetchLines, wFetchLines, ++ AllBytesMode, bRLEOverFlow, bInterrupt); ++ ++ dwCtrlRegValue |= 1 << 1; // enabled IRQ ++ ++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address ++ dwCtrlRegValue, // control register value ++ ri->tfm.dwFetchSize, // bandwidth limitor value ++ &ri->tfm.dwFetchRLESize, // out:: rle size ++ &ri->tfm.dwCheckSum) == false) { // out:: cs size ++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for RLE for Text Mode\n"); ++ ri->rs = GenericError; ++ return; ++ } //sleeponTFEBusy ++ } ++ if (bRLEOn) { ++ ri->tfm.bRLEFailed = ++ (ri->tfm.dwFetchRLESize < ri->tfm.dwFetchSize) ? ++ false : true; ++ } ++} ++ ++u8 get_text_mode_character_per_line(struct AstRVAS *pAstRVAS, u16 wScreenWidth) ++{ ++ u8 byCharPerLine = 0x00; ++ u8 byCharWidth = 0; ++ u8 byVGASR1 = readb(pAstRVAS->grce_reg_base + GRCE_SEQ + 0x1); ++ ++ byCharWidth = (byVGASR1 & 0x1) ? 8 : 9; ++ byCharPerLine = wScreenWidth / byCharWidth; ++ ++ return byCharPerLine; ++} ++ ++u16 get_text_mode_fetch_lines(struct AstRVAS *pAstRVAS, u16 wScreenHeight) ++{ ++ u8 byVGACR9 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC + 0x9); ++ u8 byFontHeight = (byVGACR9 & 0x1F) + 1; ++ u16 wFetchLines; ++ ++ wFetchLines = wScreenHeight / byFontHeight; ++ ++ return wFetchLines; ++} ++ ++// ++// HELPER Functions ++// ++ ++void prepare_bse_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bNotLastEntry, ++ u16 wStride, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ bool bInterrupt) ++{ ++ u16 wDestinationStride; ++ ++ // initialize to 0 ++ pDAddress->dw0General = 0x00; ++ pDAddress->dw1FetchWidthLine = 0x00; ++ pDAddress->dw2SourceAddr = 0x00; ++ pDAddress->dw3DestinationAddr = 0x00; ++ ++ wDestinationStride = dwFetchWidthPixels >> 3; ++ ++ // initialize to 0 ++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16) ++ | (wDestinationStride << 8) | (bNotLastEntry << 1) | bInterrupt; ++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16) ++ | (dwFetchWidthPixels * bytesPerPixel - 1); ++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc; ++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc; ++ ++ HW_ENG_DBG("After SETTING BSE Descriptor\n"); ++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General); ++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine); ++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr); ++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr); ++} ++ ++//for descriptor chaining ++void prepare_bse_descriptor_2(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bNotLastEntry, ++ u16 wStride, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ bool bInterrupt, u8 byBuckSizeRegIndex) ++{ ++ u16 wDestinationStride; ++ ++ // initialize to 0 ++ pDAddress->dw0General = 0x00; ++ pDAddress->dw1FetchWidthLine = 0x00; ++ pDAddress->dw2SourceAddr = 0x00; ++ pDAddress->dw3DestinationAddr = 0x00; ++ ++ wDestinationStride = dwFetchWidthPixels >> 3; ++ ++ // initialize to 0 ++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16) ++ | (wDestinationStride << 8) ++ | (byBuckSizeRegIndex << BSE_BUCK_SZ_INDEX_POS) ++ | (bNotLastEntry << 1) | bInterrupt; ++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16) ++ | (dwFetchWidthPixels * bytesPerPixel - 1); ++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc; ++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc; ++ ++ HW_ENG_DBG("AFter SETTING BSE Descriptor\n"); ++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General); ++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine); ++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr); ++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr); ++} ++ ++struct BSEAggregateRegister set_up_bse_bucket_2(struct AstRVAS *pAstRVAS, u8 *abyBitIndexes, ++ u8 byTotalBucketCount, u8 byBSBytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ u32 dwBucketSizeIndex) ++{ ++ struct BSEAggregateRegister aBSEAR = { 0 }; ++ void __iomem *addrBSDBS = 0; ++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register; ++ ++ if (dwBucketSizeIndex >= BSE_MAX_BUCKET_SIZE_REGS) { ++ dev_err(pAstRVAS->pdev, "Video::BSE bucket size index %d too big!", ++ dwBucketSizeIndex); ++ return aBSEAR; ++ } ++ ++ addrBSDBS = pAstRVAS->fg_reg_base + BSE_REG_BASE + dwBucketSizeRegOffset[dwBucketSizeIndex]; ++ ++ // initialize ++ memset((void *)&aBSEAR, 0x00, sizeof(struct BSEAggregateRegister)); ++ aBSEAR = setUp_bse_bucket(abyBitIndexes, byTotalBucketCount, ++ byBSBytesPerPixel, dwFetchWidthPixels, dwFetchHeight); ++ ++ writel(aBSEAR.dwBSDBS, addrBSDBS); ++ aBSEAR.dwBSCR |= readl(addrBSCR) & (BSE_ENABLE_MULT_BUCKET_SZS); ++ HW_ENG_DBG("BSE Bucket size register index %d, [%#x], readback 0x%x\n", ++ dwBucketSizeIndex, aBSEAR.dwBSDBS, readl(addrBSCR)); ++ ++ return aBSEAR; ++} ++ ++struct BSEAggregateRegister setUp_bse_bucket(u8 *abyBitIndexes, u8 byTotalBucketCount, ++ u8 byBSBytesPerPixel, u32 dwFetchWidthPixels, ++ u32 dwFetchHeight) ++{ ++ struct BSEAggregateRegister aBSEAR; ++ u32 dwSrcBucketSize = MAX_LMEM_BUCKET_SIZE; ++ u32 dwDestBucketSize = dwFetchWidthPixels * dwFetchHeight >> 3; //each bucket size ++ u8 byRegisterPosition = 0; ++ u8 cBucket; ++ ++ // initialize ++ memset((void *)&aBSEAR, 0x00, sizeof(struct BSEAggregateRegister)); ++ ++ for (cBucket = 0; cBucket < byTotalBucketCount; cBucket++) { ++ if (cBucket < 6) { ++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket, ++ abyBitIndexes[cBucket]); ++ HW_ENG_DBG("BSBPS0 Position: 0x%x\n", byRegisterPosition); ++ aBSEAR.adwBSBPS[0] |= abyBitIndexes[cBucket] ++ << byRegisterPosition; ++ ++ byRegisterPosition += 5; ++ } else if (cBucket >= 6 && cBucket < 12) { ++ if (cBucket == 6) ++ byRegisterPosition = 0; ++ ++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket, ++ abyBitIndexes[cBucket]); ++ HW_ENG_DBG("BSBPS1 Position: 0x%x\n", byRegisterPosition); ++ aBSEAR.adwBSBPS[1] |= abyBitIndexes[cBucket] ++ << byRegisterPosition; ++ byRegisterPosition += 5; ++ } else { ++ if (cBucket == 12) ++ byRegisterPosition = 0; ++ ++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket, ++ abyBitIndexes[cBucket]); ++ HW_ENG_DBG("BSBPS2 Position: 0x%x\n", byRegisterPosition); ++ aBSEAR.adwBSBPS[2] |= abyBitIndexes[cBucket] ++ << byRegisterPosition; ++ byRegisterPosition += 5; ++ } ++ } ++ ++ aBSEAR.dwBSCR = (((byTotalBucketCount - 1) << 8) ++ | ((byBSBytesPerPixel - 1) << 4) | (0x0 << 3) ++ | (0x1 << 1) | 0x1) & BSCMD_MASK; ++ aBSEAR.dwBSDBS = ((dwSrcBucketSize << 24) | dwDestBucketSize) ++ & 0xfcfffffc; ++ ++ HW_ENG_DBG("dwFetchWidthPixels [%#x], dwFetchHeight [%#x]\n", ++ dwFetchWidthPixels, dwFetchHeight); ++ HW_ENG_DBG("BSE Destination Bucket Size [%#x]\n", dwDestBucketSize); ++ HW_ENG_DBG("BSE Control [%#x]\n", aBSEAR.dwBSCR); ++ HW_ENG_DBG("BSE BSDBS [%#x]\n", aBSEAR.dwBSDBS); ++ HW_ENG_DBG("BSE BSBPS0 [%#x]\n", aBSEAR.adwBSBPS[0]); ++ HW_ENG_DBG("BSE BSBPS1 [%#x]\n", aBSEAR.adwBSBPS[1]); ++ HW_ENG_DBG("BSE BSBPS2 [%#x]\n", aBSEAR.adwBSBPS[2]); ++ ++ return aBSEAR; ++} ++ ++void prepare_tfe_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bNotLastEntry, u8 bCheckSum, ++ bool bEnabledRLE, u16 wStride, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ enum SelectedByteMode sbm, bool bRLEOverFLow, ++ bool bInterrupt) ++{ ++ enum SkipByteMode skipBM = NoByteSkip; ++ enum DataProccessMode dpm = NormalTileMode; ++ enum StartBytePosition sbp = StartFromByte0; ++ ++ HW_ENG_DBG("BEFORE SETTING TFE Descriptor\n"); ++ // initialize to 0 ++ pDAddress->dw0General = 0x00; ++ pDAddress->dw1FetchWidthLine = 0x00; ++ pDAddress->dw2SourceAddr = 0x00; ++ pDAddress->dw3DestinationAddr = 0x00; ++ ++ if (dwFetchHeight & 0x3) ++ dwFetchHeight = ((dwFetchHeight + 3) >> 2) << 2; ++ ++ switch (sbm) { ++ case AllBytesMode: ++ break; ++ ++ case LowByteMode: ++ dpm = SplitByteMode; ++ if (bytesPerPixel == 2) ++ skipBM = SkipOneByte; ++ else if (bytesPerPixel == 3) ++ skipBM = SkipTwoByte; ++ else if (bytesPerPixel == 4) ++ skipBM = SkipThreeByte; ++ break; ++ ++ case MiddleByteMode: ++ dpm = SplitByteMode; ++ if (bytesPerPixel == 2) { ++ skipBM = SkipOneByte; ++ sbp = StartFromByte1; ++ } else if (bytesPerPixel == 3) { ++ skipBM = SkipTwoByte; ++ sbp = StartFromByte1; ++ } else if (bytesPerPixel == 4) { ++ skipBM = SkipThreeByte; ++ sbp = StartFromByte1; ++ } ++ break; ++ ++ case TopByteMode: ++ dpm = SplitByteMode; ++ if (bytesPerPixel == 2) { ++ skipBM = SkipOneByte; ++ sbp = StartFromByte1; ++ } else if (bytesPerPixel == 3) { ++ skipBM = SkipTwoByte; ++ sbp = StartFromByte2; ++ } else if (bytesPerPixel == 4) { ++ skipBM = SkipThreeByte; ++ sbp = StartFromByte2; ++ } ++ break; ++ ++ case PlanarToPackedMode: ++ dpm = FourBitPlanarMode; ++ break; ++ ++ case PackedToPackedMode: ++ dpm = FourBitPackedMode; ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (dwFetchWidthPixels > wStride) ++ wStride = dwFetchWidthPixels; ++ ++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16) | (dpm << 13) ++ | (sbp << 10) | (skipBM << 8) | (bRLEOverFLow << 7) ++ | (bCheckSum << 5) | (bEnabledRLE << 4) | (bNotLastEntry << 1) ++ | bInterrupt; ++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16) ++ | (dwFetchWidthPixels * bytesPerPixel - 1); ++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc; ++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc; ++ ++ HW_ENG_DBG("After SETTING TFE Descriptor\n"); ++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General); ++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine); ++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr); ++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr); ++} ++ ++void prepare_tfe_text_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bEnabledRLE, u32 dwFetchWidth, ++ u32 dwFetchHeight, enum DataProccessMode dpm, ++ bool bRLEOverFLow, bool bInterrupt) ++{ ++ // initialize to 0 ++ pDAddress->dw0General = 0x00; ++ pDAddress->dw1FetchWidthLine = 0x00; ++ pDAddress->dw2SourceAddr = 0x00; ++ pDAddress->dw3DestinationAddr = 0x00; ++ ++ if (dwFetchHeight & 0x3) ++ dwFetchHeight = ((dwFetchHeight + 3) >> 2) << 2; ++ ++ pDAddress->dw0General = (dwFetchWidth << 16) | (dpm << 13) ++ | (bRLEOverFLow << 7) | (1 << 5) | (bEnabledRLE << 4) ++ | bInterrupt; ++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16) ++ | (dwFetchWidth - 1); ++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc; ++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc; ++ ++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General); ++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine); ++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr); ++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr); ++} ++ ++void on_fetch_mode_13_data(struct AstRVAS *pAstRVAS, struct RvasIoctl *ri, bool bRLEOn) ++{ ++ struct Descriptor *pDescriptorAdd; ++ struct Descriptor *pDescriptorAddPhys; ++ phys_addr_t source_addr = get_phy_fb_start_address(pAstRVAS); ++ phys_addr_t dest_addr; ++ bool bRLEOverFlow = false; ++ bool bNotLastEntry = false; ++ bool bInterrupt = 1; ++ u32 dwFetchHeight = MODE13_HEIGHT; ++ u32 dwFetchWidth = MODE13_WIDTH; ++ phys_addr_t data_phys = 0; ++ phys_addr_t data_phys_rle = 0; ++ u32 dwCtrlRegValue = 0x55AA0080; ++ void *desc_virt = NULL; ++ phys_addr_t desc_phy = 0; ++ struct ContextTable *ctx_entry = NULL; ++ ++ HW_ENG_DBG("Start, bRLEOn: %d\n", bRLEOn); ++ ++ ctx_entry = get_context_entry(ri->rc, pAstRVAS); ++ ++ if (ctx_entry) { ++ desc_virt = ctx_entry->desc_virt; ++ desc_phy = ctx_entry->desc_phy; ++ } else { ++ pr_err("Mode 13: Failed to get context\n"); ++ ri->rs = InvalidContextHandle; ++ return; ++ } ++ ++ ri->tfm.dwFetchSize = MODE13_HEIGHT * MODE13_WIDTH; ++ ++ data_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS); ++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS); ++ ++ if (!data_phys || !data_phys_rle) { ++ ri->rs = InvalidMemoryHandle; ++ dev_err(pAstRVAS->pdev, "Fetch Text: Invalid Memoryhandle\n"); ++ return; ++ } ++ if (!data_phys || (bRLEOn && !data_phys_rle)) { ++ pr_err("Mode 13: Invalid memory handle\n"); ++ ri->rs = InvalidMemoryHandle; ++ return; ++ } ++ ++ pDescriptorAdd = desc_virt; ++ pDescriptorAddPhys = (struct Descriptor *)desc_phy; ++ ++ HW_ENG_DBG("\n===========MODE 13 FETCHED DATA===========\n"); ++ ++ // Enable TFE ++ dwCtrlRegValue |= 1; ++ dwCtrlRegValue &= TFCTL_DESCRIPTOR_IN_DDR_MASK; ++ dest_addr = data_phys; ++ prepare_tfe_descriptor(pDescriptorAdd, source_addr, ++ dest_addr, ++ false, //is last entry ++ 1, //checksum ++ false, //No RLE ++ dwFetchWidth, ++ 1, //bytes per pixel ++ dwFetchWidth, dwFetchHeight, ++ PackedToPackedMode, bRLEOverFlow, ++ 1); ++ ++ dwCtrlRegValue |= 1 << 1; // enabled IRQ ++ ++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address ++ dwCtrlRegValue, // control register value ++ ri->tfm.dwFetchSize, // bandwidth limitor value ++ &ri->tfm.dwFetchRLESize, // out:: rle size ++ &ri->tfm.dwCheckSum) == false) { // out:: cs size ++ ri->rs = GenericError; ++ return; ++ } ++ ++ // RLE enabled ++ if (bRLEOn) { ++ bRLEOverFlow = true; ++ dwCtrlRegValue = 1; ++ dwCtrlRegValue |= (ri->tfm.byRLETripletCode << 24) ++ | (ri->tfm.byRLERepeatCode << 16); ++ source_addr = data_phys; ++ dest_addr = data_phys_rle; ++ HW_ENG_DBG("RLE is on\n"); ++ ++ prepare_tfe_descriptor(pDescriptorAdd, source_addr, ++ dest_addr, ++ bNotLastEntry, //not last entry? ++ 1, //checksum ++ bRLEOn, //RLE? ++ dwFetchWidth, 1, dwFetchWidth, dwFetchHeight, ++ AllBytesMode, bRLEOverFlow, bInterrupt); ++ ++ dwCtrlRegValue |= 1 << 1; // enabled IRQ ++ ++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address ++ dwCtrlRegValue, // control register value ++ ri->tfm.dwFetchSize, // bandwidth limitor value ++ &ri->tfm.dwFetchRLESize, // out:: rle size ++ &ri->tfm.dwCheckSum) == false) { // out:: cs size ++ ri->rs = GenericError; ++ return; ++ } //sleeponTFEBusy ++ } ++ ++ if (bRLEOn) ++ ri->tfm.bRLEFailed = ++ (ri->tfm.dwFetchRLESize < ri->tfm.dwFetchSize) ? ++ false : true; ++} ++ ++void ioctl_fetch_mode_13_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ bool bRLEOn = ri->tfm.bEnableRLE; ++ ++ ri->rs = SuccessStatus; ++ ++ // first time fetch ++ on_fetch_mode_13_data(pAstRVAS, ri, bRLEOn); ++ ++ if (ri->rs != SuccessStatus) ++ return; ++ ++ //if RLE fail. need to TFE without RLE to first buffer ++ if (ri->tfm.bEnableRLE & ri->tfm.bRLEFailed) { ++ bRLEOn = false; ++ on_fetch_mode_13_data(pAstRVAS, ri, bRLEOn); ++ } ++} ++ ++// Enable Snoop Interrupts and TSE, Disable FIQ ++static void enable_tse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ void __iomem *reg_addr = pAstRVAS->fg_reg_base ++ + TSE_SnoopCommand_Register_Offset; ++ ++ reg_val = readl(reg_addr); ++ reg_val |= SNOOP_IRQ_MASK; ++ reg_val &= ~SNOOP_FIQ_MASK; ++ ++ HW_ENG_DBG("Enabled TSE Interrupts[%#X]\n", reg_val); ++ writel(reg_val, reg_addr); ++ pAstRVAS->tse_tsicr = TSE_INTR_COUNT; ++ reg_addr = pAstRVAS->fg_reg_base ++ + TSE_TileSnoop_Interrupt_Count; ++ //set max wait time before interrupt ++ writel(pAstRVAS->tse_tsicr, reg_addr); ++} ++ ++//disable tse interrupt ++static void disable_tse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ void __iomem *reg_addr = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset; ++ ++ // Disable Snoop Interrupts and TSE, Disable FIQ ++ reg_val = readl(reg_addr); ++ HW_ENG_DBG("disable interrupt\n"); ++ reg_val &= ~(SNOOP_IRQ_MASK | SNOOP_FIQ_MASK); ++ writel(reg_val, reg_addr); ++} ++ ++static void enable_grce_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ void __iomem *reg_addr = pAstRVAS->grce_reg_base + GRCE_CTL0; ++ ++ reg_val = readl(reg_addr); ++ reg_val |= GRC_IRQ_MASK; ++ writel(reg_val, reg_addr); ++ HW_ENG_DBG("Enabled GRC Interrupts[%#X]\n", reg_val); ++} ++ ++//enable all interrupts ++void enable_grce_tse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ enable_grce_interrupt(pAstRVAS); ++ enable_tse_interrupt(pAstRVAS); ++} ++ ++void disable_grce_tse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ ++ HW_ENG_DBG("disable_interrupts- grce_reg_base: %p GRCE_CTL0: %#x\n", ++ pAstRVAS->grce_reg_base, GRCE_CTL0); ++ reg_val = readl(pAstRVAS->grce_reg_base + GRCE_CTL0); ++ writel(reg_val & (~GRC_IRQ_MASK), pAstRVAS->grce_reg_base + GRCE_CTL0); ++ disable_tse_interrupt(pAstRVAS); ++} ++ ++u32 clear_tse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 tse_sts = 0; ++ u32 tse_tile_status = 0; ++ u32 tse_snoop_ctrl = 0; ++ void __iomem *tse_ctrl_addr = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset; ++ ++ HW_ENG_DBG("clear tse inerrupt"); ++ tse_sts = readl(pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ tse_snoop_ctrl = readl(pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset); ++ ++ if (tse_sts & (TSSTS_TC_SCREEN0 | TSSTS_TC_SCREEN1)) { ++ if (tse_sts & TSSTS_TC_SCREEN0) { ++ HW_ENG_DBG("Snoop** Update Screen 0\n"); ++ // clear interrupt and switch to screen 1 ++ tse_snoop_ctrl |= TSCMD_SCREEN_OWNER; ++ writel(tse_sts, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ writel(tse_snoop_ctrl, tse_ctrl_addr); ++ ++ } else if (tse_sts & TSSTS_TC_SCREEN1) { ++ HW_ENG_DBG("Snoop** Update Screen 1\n"); ++ tse_snoop_ctrl &= ~TSCMD_SCREEN_OWNER; // snap shutter ++ // clear status ++ writel(tse_sts, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ // clear interrupt and switch to screen 1 ++ writel(tse_snoop_ctrl, tse_ctrl_addr); ++ } ++ // read clear interrupt ++ tse_tile_status = readl(pAstRVAS->fg_reg_base ++ + TSE_TileCount_Register_Offset); ++ ++ if (tse_sts & TSSTS_FIFO_OVFL) { ++ //need to send full frame ++ dev_err(pAstRVAS->pdev, "TSE snoop fifo overflow\n"); ++ writel(TSSTS_FIFO_OVFL, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset); ++ memset((void *)pAstRVAS->accrued_sm, 0xff, sizeof(pAstRVAS->accrued_sm)); ++ memset((void *)&pAstRVAS->accrued_sa, 0xff, ++ sizeof(pAstRVAS->accrued_sa)); ++ } else { ++ get_snoop_map_data(pAstRVAS); ++ } ++ } ++ return tse_sts; ++} ++ ++// LDMA interrupt ++bool clear_ldma_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 ldma_sts = 0; ++ ++ ldma_sts = readl(pAstRVAS->fg_reg_base + LDMA_Status_Register); ++ ++ if (ldma_sts & 0x02) { ++ //HW_ENG_DBG("Got a LDMA interrupt\n"); ++ // write 1 to clear the interrupt ++ writel(0x2, pAstRVAS->fg_reg_base + LDMA_Status_Register); ++ return true; ++ } ++ return false; ++} ++ ++bool clear_tfe_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 tfe_sts = 0; ++ ++ tfe_sts = readl(pAstRVAS->fg_reg_base + TFE_Status_Register); ++ ++ if (tfe_sts & 0x02) { ++ // HW_ENG_DBG("Debug: TFSTS Interrupt is triggered\n"); ++ writel(0x2, pAstRVAS->fg_reg_base + TFE_Status_Register); ++ return true; ++ } ++ return false; ++} ++ ++bool clear_bse_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 bse_sts = 0; ++ ++ bse_sts = readl(pAstRVAS->fg_reg_base + BSE_Status_Register); ++ ++ if (bse_sts & 0x02) { ++ writel(0x2, pAstRVAS->fg_reg_base + BSE_Status_Register); ++ return true; ++ } ++ return false; ++} ++ ++void setup_lmem(struct AstRVAS *pAstRVAS) ++{ ++ writel(0x0, pAstRVAS->fg_reg_base + LMEM_BASE_REG_3); ++ writel(0x2000, pAstRVAS->fg_reg_base + LMEM_LIMIT_REG_3); ++ writel(0x9c89c8, pAstRVAS->fg_reg_base + LMEM11_P0); ++ writel(0x9c89c8, pAstRVAS->fg_reg_base + LMEM12_P0); ++ writel(0xf3cf3c, pAstRVAS->fg_reg_base + LMEM11_P1); ++ writel(0x067201, pAstRVAS->fg_reg_base + LMEM11_P2); ++ writel(0x00F3CF3C, pAstRVAS->fg_reg_base + LMEM10_P1); ++ writel(0x00067201, pAstRVAS->fg_reg_base + LMEM10_P2); ++} ++ ++bool host_suspended(struct AstRVAS *pAstRVAS) ++{ ++ u32 GRCE18 = readl(pAstRVAS->grce_reg_base + GRCE_ATTR_VGAIR0_OFFSET); ++ ++ // VGAER is GRCE19 ++ // VGAER bit[0]:0 - vga disabled (host suspended) ++ // 1 - vga enabled ++ HW_ENG_DBG("GRCE18:%#x\n", GRCE18); ++ if (GRCE18 & 0x100) ++ return false; ++ else ++ return true; ++} ++ +diff --git a/drivers/soc/aspeed/rvas/hardware_engines.h b/drivers/soc/aspeed/rvas/hardware_engines.h +--- a/drivers/soc/aspeed/rvas/hardware_engines.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/hardware_engines.h 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,550 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller. ++ * Refer to the README file included with this package for driver version and adapter compatibility. ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ */ ++ ++#ifndef __HARDWAREENGINES_H__ ++#define __HARDWAREENGINES_H__ ++ ++#include ++#include ++#include "video_ioctl.h" ++ ++#define MAX_NUM_CONTEXT (8) ++#define MAX_NUM_MEM_TBL (24)//each context has 3 ++ ++#define MAX_DESC_SIZE (PAGE_SIZE) // (0x400) ++ ++#define ENGINE_TIMEOUT_IN_SECONDS (3) ++#define TFE_TIMEOUT_IN_MS (750) ++#define DESCRIPTOR_SIZE (16) ++#define TILE_SIZE (32) ++#define MAX_LMEM_BUCKET_SIZE (0x80) ++ ++#define EIGHT_BYTE_ALIGNMENT_MASK (0xfffffff7) ++#define SIXTEEN_BYTE_ALIGNMENT_MASK (0x8) ++#define TFCTL_DESCRIPTOR_IN_DDR_MASK (0xffffff7f) ++#define BSCMD_MASK (0xffff0f37) ++ ++#define TEXT_MODE_BUFFER_ALIGNMENT (16) ++#define MODE_13_CHAR_WIDTH (32) ++#define BSE_MEMORY_ACCESS_MASK (0x00ffffff) ++#define MEM_TABLE_SIZE_INCR (8) ++#define MEMORY_TABLE_GROW_INCR (8) ++ ++#define MAX_TEXT_DATA_SIZE (8192) ++ ++// For 2700 ++//#define SCU200_System_Reset_Control_Register (0x200) ++#define SCU204_System_Reset_Control_Clear_Register (0x204) ++#define SCU240_Clock_Stop_Control_Register (0x240) ++#define SCU244_Clock_Stop_Control_Clear_Register (0x244) ++//#define SCU500_Hardware_Strap1_Register (0x500) ++//TO DO local monitor on off ++//single node - vga and dp ++//dual node- node 0- vga only, node 1- dp only ++#define SCU000_Silicon_Revision_ID (0x0) ++#define SCU448_Pin_Ctrl (0x448) ++//#define SCU0C0_Misc1_Ctrl (0x0C0) ++//#define SCU0D0_Misc3_Ctrl (0x0D0) ++ //SCU448 IO ++#define VGAVS_ENBL_27 (0x70000000) ++#define VGAHS_ENBL_27 (0x7000000) ++//SCU0C0 ++#define VGA0_CRT_DISBL BIT(1) ++#define VGA1_CRT_DISBL BIT(2) ++//SCU0D0 IO ++#define VGA0_PWR_OFF_VDAC BIT(2) ++#define VGA1_PWR_OFF_VDAC BIT(3) ++ ++#define SCU_RVAS1_ENGINE_BIT BIT(10) ++#define SCU_RVAS0_ENGINE_BIT BIT(9) ++#define SCU_RVAS1_STOP_CLOCK_BIT BIT(28) ++#define SCU_RVAS0_STOP_CLOCK_BIT BIT(25) ++ ++// For 2600 ++//SCU ++#define SCU000_Protection_Key_Register (0x000) ++#define SCU040_Module_Reset_Control_Register_Set_1 (0x040) ++#define SCU044_Module_Reset_Control_Clear_Register_1 (0x044) ++#define SCU080_Clock_Stop_Control_Register_Set_1 (0x080) ++#define SCU084_Clock_Stop_Control_Clear_Register (0x084) ++#define SCU500_Hardware_Strap1_Register (0x500) ++#define SCU418_Pin_Ctrl (0x418) ++#define SCU0C0_Misc1_Ctrl (0x0C0) ++#define SCU0D0_Misc3_Ctrl (0x0D0) ++//SCU418 ++#define VGAVS_ENBL BIT(31) ++#define VGAHS_ENBL BIT(30) ++//SCU0C0 ++#define VGA_CRT_DISBL BIT(6) ++//SCU0D0 ++#define PWR_OFF_VDAC BIT(3) ++ ++#define SCU_UNLOCK_PWD (0x1688A8A8) ++#define SCU_RVAS_ENGINE_BIT BIT(9) ++#define SCU_RVAS_STOP_CLOCK_BIT BIT(25) ++// ++//MCR -edac ++#define MCR_CONF 0x04 /* configuration register */ ++ ++//DP ++#define DPTX_Configuration_Register (0x100) ++#define DPTX_PHY_Configuration_Register (0x104) ++//DPTX100 ++#define AUX_RESETN (24) ++//DPTX104 ++#define DP_TX_I_MAIN_ON (8) ++ ++//TOP REG ++#define TOP_REG_OFFSET (0x0) ++#define TOP_REG_CTL (TOP_REG_OFFSET + 0x00) ++#define TOP_REG_STS (TOP_REG_OFFSET + 0x04) ++#define LMEM_BASE_REG_3 (TOP_REG_OFFSET + 0x2c) ++#define LMEM_LIMIT_REG_3 (TOP_REG_OFFSET + 0x3c) ++#define LMEM11_P0 (TOP_REG_OFFSET + 0x4c) ++#define LMEM12_P0 (TOP_REG_OFFSET + 0x50) ++#define LMEM10_P1 (TOP_REG_OFFSET + 0x80) ++#define LMEM11_P1 (TOP_REG_OFFSET + 0x84) ++#define LMEM10_P2 (TOP_REG_OFFSET + 0xA0) ++#define LMEM11_P2 (TOP_REG_OFFSET + 0xA4) ++ ++#define TSE_SnoopCommand_Register_Offset (0x0400) ++#define TSE_TileCount_Register_Offset (0x0418) ++#define TSE_Status_Register_Offset (0x0404) ++#define TSE_CS0Reg (0x0408) ++#define TSE_CS1Reg (0x040c) ++#define TSE_RS0Reg (0x0410) ++#define TSE_RS1Reg (0x0414) ++#define TSE_TileSnoop_Interrupt_Count (0x0420) ++#define TSE_FrameBuffer_Offset (0x041c) ++#define TSE_UpperLimit_Offset (0x0424) ++#define TSE_SnoopMap_Offset (0x0600) ++ ++#define TFE_Descriptor_Table_Offset (0x0108) ++#define TFE_Descriptor_Control_Resgister (0x0100) ++#define TFE_Status_Register (0x0104) ++#define TFE_RLE_CheckSum (0x010C) ++#define TFE_RLE_Byte_Count (0x0110) ++#define TFE_RLE_LIMITOR (0x0114) ++ ++#define BSE_REG_BASE (0x0200) ++#define BSE_Command_Register (0x0200) ++#define BSE_Status_Register (0x0204) ++#define BSE_Descriptor_Table_Base_Register (0x0208) ++#define BSE_Destination_Buket_Size_Resgister (0x020c) ++#define BSE_Bit_Position_Register_0 (0x0210) ++#define BSE_Bit_Position_Register_1 (0x0214) ++#define BSE_Bit_Position_Register_2 (0x0218) ++#define BSE_LMEM_Temp_Buffer_Offset (0x0000) ++#define BSE_ENABLE_MULT_BUCKET_SZS BIT(12) ++#define BSE_BUCK_SZ_INDEX_POS (4) ++#define BSE_MAX_BUCKET_SIZE_REGS (16) ++#define BSE_BIT_MASK_Register_Offset (0x54) ++ ++#define LDMA_Control_Register (0x0300) ++#define LDMA_Status_Register (0x0304) ++#define LDMA_Descriptor_Table_Base_Register (0x0308) ++#define LDMA_CheckSum_Register (0x030c) ++#define LDMA_LMEM_Descriptor_Offset (0x4000) ++ ++//Shadow ++#define GRCE_SIZE (0x800) ++#define GRCE_ATTR_OFFSET (0x0) ++#define GRCE_ATTR_VGAIR0_OFFSET (0x18) ++#define GRCE_SEQ_OFFSET (0x20) ++#define GRCE_GCTL_OFFSET (0x30) ++#define GRCE_GRCCTL0_OFFSET (0x58) ++#define GRCE_GRCSTS_OFFSET (0x5c) ++#define GRCE_CRTC_OFFSET (0x60) ++#define GRCE_CRTCEXT_OFFSET (0x80) ++#define GRCE_XCURCTL_OFFSET (0xc8) ++#define GRCE_PAL_OFFSET (0x400) ++//size ++#define GRCELT_RAM_SIZE (0x400) ++#define GRCE_XCURCOL_SIZE (0x40) ++#define GRCE_XCURCTL_SIZE (0x40) ++#define GRCE_CRTC_SIZE (0x40) ++#define GRCE_CRTCEXT_SIZE (0x8) ++#define GRCE_SEQ_SIZE (0x8) ++#define GRCE_GCTL_SIZE (0x8) ++#define GRCE_ATTR_SIZE (0x20) ++ ++#define GRCELT_RAM (GRCE_PAL_OFFSET) ++#define GRCE_XCURCTL (GRCE_XCURCTL_OFFSET) ++#define GRCE_CRTC (GRCE_CRTC_OFFSET) ++#define GRCE_CRTCEXT (GRCE_CRTCEXT_OFFSET) ++#define GRCE_SEQ (GRCE_SEQ_OFFSET) ++#define GRCE_GCTL (GRCE_GCTL_OFFSET) ++#define GRCE_CTL0 (GRCE_GRCCTL0_OFFSET) ++#define GRCE_STATUS_REGISTER (GRCE_GRCSTS_OFFSET) ++#define GRCE_ATTR (GRCE_ATTR_OFFSET) ++#define AST_VIDEO_SCRATCH_34C (0x8c) ++#define AST_VIDEO_SCRATCH_350 (0x90) ++#define AST_VIDEO_SCRATCH_354 (0x94) ++#define MODE_GET_INFO_DE (0xA8) ++ ++//GRC interrupt ++#define GRC_FIQ_MASK (0x000003ff) ++#define GRC_IRQ_MASK (0x000003ff) ++#define GRC_INT_STS_MASK (0x000003ff) ++#define GRCSTS_XCUR_POS BIT(9) ++#define GRCSTS_XCUR_DDR BIT(8) ++#define GRCSTS_XCUR_CTL BIT(7) ++#define GRCSTS_PLT_RAM BIT(6) ++#define GRCSTS_XCRTC BIT(5) ++#define GRCSTS_CRTC BIT(4) ++#define GRCSTS_GCTL BIT(3) ++#define GRCSTS_SEQ BIT(2) ++#define GRCSTS_ATTR1 BIT(1) ++#define GRCSTS_ATTR0 BIT(0) ++#define SNOOP_RESTART (GRCSTS_XCUR_CTL | GRCSTS_XCRTC | GRCSTS_CRTC | GRCSTS_GCTL) ++ ++//snoop TSE ++#define SNOOP_TSE_MASK (0x00000001) ++#define SNOOP_IRQ_MASK (0x00000100) ++#define SNOOP_FIQ_MASK (0x00000200) ++#define TSCMD_SCREEN_OWNER BIT(15) ++#define TSCMD_PITCH_BIT (16) ++#define TSCMD_INT_ENBL_BIT (8) ++#define TSCMD_CPT_BIT (6) ++#define TSCMD_RPT_BIT (4) ++#define TSCMD_BPP_BIT (2) ++#define TSCMD_VGA_MODE_BIT (1) ++#define TSCMD_TSE_ENBL_BIT (0) ++#define TSSTS_FIFO_OVFL BIT(5) ++#define TSSTS_FONT BIT(4) ++#define TSSTS_ATTR BIT(3) ++#define TSSTS_ASCII BIT(2) ++#define TSSTS_TC_SCREEN1 BIT(1) ++#define TSSTS_TC_SCREEN0 BIT(0) ++#define TSSTS_ALL (0x3f) ++ ++#define TSE_INTR_COUNT (0xCB700) //50MHz clock ~1/60 sec ++//#define TSE_INTR_COUNT (0x196E00) //50MHz clock ~1/30 sec ++#define TIMER_INTR_COUNT (0x65000) // 25MHz clock ~1/60 sec ++ ++#ifdef CONFIG_MACH_ASPEED_G6 ++//Timer ++/* Register byte offsets */ ++// AST2600 Timer registers ++#define TIMER_STATUS_BIT(x) (1 << ((x) - 1)) ++ ++#define OFFSET_TIMER1 0x00 /* * timer 1 offset */ ++#define OFFSET_TIMER2 0x10 /* * timer 2 offset */ ++#define OFFSET_TIMER3 0x20 /* * timer 3 offset */ ++#define OFFSET_TIMER4 0x40 /* * timer 4 offset */ ++#define OFFSET_TIMER5 0x50 /* * timer 5 offset */ ++#define OFFSET_TIMER6 0x60 /* * timer 6 offset */ ++#define OFFSET_TIMER7 0x70 /* * timer 7 offset */ ++#define OFFSET_TIMER8 0x80 /* * timer 8 offset */ ++ ++#define OFF_TIMER_REG_CURR_CNT 0x00 ++#define OFF_TIMER_REG_LOAD_CNT 0x04 ++#define OFF_TIMER_REG_EO0 0x08 /* Read to clear interrupt */ ++#define OFF_TIMER_REG_EOI 0x0c /* Read to clear interrupt */ ++#define OFF_TIMER_REG_STAT 0x10 /* Timer Interrupt Status */ ++#define OFF_TIMER_REG_CONTROL 0x30 /* Control Register */ ++#define OFF_TIMER_REG_STATUS 0x34 /* Status Register */ ++#define OFF_TIMER_REG_CLEAR_CONTROL 0x3C /* Control Register */ ++#define RB_OFF_TIMERS_STAT 0xA0 /* * timers status offset */ ++ ++#define CTRL_TIMER1 (0) ++#define CTRL_TIMER2 (4) ++#define CTRL_TIMER3 (8) ++#define CTRL_TIMER4 (12) ++#define CTRL_TIMER5 (16) ++#define CTRL_TIMER6 (20) ++#define CTRL_TIMER7 (24) ++#define CTRL_TIMER8 (28) ++#define BIT_TIMER_ENBL BIT(0) ++#define BIT_TIMER_CLK_SEL BIT(1) ++#define BIT_INTERRUPT_ENBL BIT(2) ++#define BIT_TIMER_STAT BIT(0) ++#endif ++ ++#define SNOOP_MAP_QWORD_COUNT (64) ++#define BSE_UPPER_LIMIT (0x900000) //(0x540000) ++#define FULL_BUCKETS_COUNT (16) ++#define MODE13_HEIGHT (200) ++#define MODE13_WIDTH (320) ++ ++#define NUM_SNOOP_ROWS (64) ++ ++//vga memory information ++#define DDR_SIZE_CONFIG_BITS (0x3) ++#define VGA_MEM_SIZE_CONFIG_BITS (0x3) ++#define DDR_BASE_27 (0x400000000) ++#define DDR_BASE (0x80000000) ++ ++//grce ++#define VGACR0_REG (0x60) ++#define VGACR9F_REG (0x9F) ++ ++//display out ++#define VGA_OUT BIT(0) ++#define DP_OUT BIT(1) ++ ++struct ContextTable { ++ struct inode *pin; ++ struct file *pf; ++ struct SnoopAggregate sa; ++ u64 aqwSnoopMap[NUM_SNOOP_ROWS]; ++ void *rc; ++ struct EventMap emEventWaiting; ++ struct EventMap emEventReceived; ++ u32 dwEventWaitInMs; ++ void *desc_virt; ++ phys_addr_t desc_phy; ++}; ++ ++struct MemoryMapTable { ++ struct file *pf; ++ void *pvVirtualAddr; ++ dma_addr_t mem_phys; ++ u32 dwLength; ++ u8 byDmaAlloc; ++ u8 byReserved[3]; ++}; ++ ++union EmDwordUnion { ++ struct EventMap em; ++ u32 dw; ++}; ++ ++struct Descriptor { ++ u32 dw0General; ++ u32 dw1FetchWidthLine; ++ u32 dw2SourceAddr; ++ u32 dw3DestinationAddr; ++}; ++ ++struct BSEAggregateRegister { ++ u32 dwBSCR; ++ u32 dwBSDBS; ++ u32 adwBSBPS[3]; ++}; ++ ++enum SkipByteMode { ++ NoByteSkip = 0, SkipOneByte = 1, SkipTwoByte = 2, SkipThreeByte = 3 ++}; ++ ++enum StartBytePosition { ++ StartFromByte0 = 0, ++ StartFromByte1 = 1, ++ StartFromByte2 = 2, ++ StartFromByte3 = 3 ++}; ++ ++struct VGAMemInfo { ++ u32 dwVGASize; ++ u32 dwDRAMSize; ++ phys_addr_t qwFBPhysStart; ++}; ++ ++struct VideoDataBufferInfo { ++ u32 dwSize; ++ phys_addr_t dwPhys; ++ phys_addr_t dwVirt; ++}; ++ ++enum ColorMode { ++ MODE_EGA = 0x0, //4bpp eg. mode 12/6A ++ MODE_VGA = 0x1, //mode 13 ++ MODE_BPP15 = 0x2, ++ MODE_BPP16 = 0x3, ++ MODE_BPP32 = 0x4, ++ MODE_TEXT = 0xE, ++ MODE_CGA = 0xF ++}; ++ ++struct ModeInfo { ++ u8 byColorMode; ++ u8 byRefreshRateIndex; ++ u8 byModeID; ++ u8 byScanLines; ++}; ++ ++struct NewModeInfoHeader { ++ u8 byReserved; ++ u8 byDisplayInfo; ++ u8 byColorDepth; ++ u8 byMhzPixelClock; ++}; ++ ++struct DisplayEnd { ++ u16 HDE; ++ u16 VDE; ++}; ++ ++struct Resolution { ++ u16 wWidth; ++ u16 wHeight; ++}; ++ ++struct Video_OsSleepStruct { ++ wait_queue_head_t queue; ++ struct timer_list tim; ++ u8 Timeout; ++}; ++ ++struct EngineInfo { ++ struct semaphore sem; ++ struct Video_OsSleepStruct wait; ++ u8 finished; ++}; ++ ++struct VideoMem { ++ dma_addr_t phy; ++ void *pVirt; ++ u32 size; ++}; ++ ++struct VideoEngineMem { ++ struct VideoMem captureBuf0; ++ struct VideoMem captureBuf1; ++ struct VideoMem jpegTable; ++}; ++ ++struct aspeed_rvas_config { ++ u8 version; ++ const u32 *dram_table; ++}; ++ ++struct AstRVAS { ++ struct miscdevice rvas_dev; ++ struct aspeed_rvas_config *config; ++ void *pdev; ++ int irq_fge; //FrameGrabber IRQ number ++ int irq_vga; // VGA IRQ number ++ int irq_video; ++ void __iomem *fg_reg_base; ++ void __iomem *grce_reg_base; ++ void __iomem *video_reg_base; ++ struct regmap *scu; ++ struct regmap *scu_io; ++ struct reset_control *rvas_reset; ++ struct reset_control *video_engine_reset; ++ struct VGAMemInfo FBInfo; ++ u64 accrued_sm[SNOOP_MAP_QWORD_COUNT]; ++ struct SnoopAggregate accrued_sa; ++ struct VideoGeometry current_vg; ++ u32 snoop_stride; ++ u32 tse_tsicr; ++ struct EngineInfo tfe_engine; ++ struct EngineInfo bse_engine; ++ struct EngineInfo ldma_engine; ++ struct EngineInfo video_engine; ++ struct semaphore mem_sem; ++ struct semaphore context_sem; ++ struct Video_OsSleepStruct video_wait; ++ u8 video_intr_occurred; ++ u8 timer_irq_requested; ++ u8 display_out; ++ u8 rvas_index; ++ struct ContextTable *ppctContextTable[MAX_NUM_CONTEXT]; ++ u32 dwMemoryTableSize; ++ u32 dwScreenOffset; ++ struct MemoryMapTable *ppmmtMemoryTable[MAX_NUM_MEM_TBL]; ++ struct completion video_compression_complete; ++ struct completion video_capture_complete; ++ struct clk *vclk; ++ struct clk *eclk; ++ struct clk *rvasclk; ++ void __iomem *dp_base; ++ u32 sequence; ++ struct VideoEngineMem vem; ++ u8 veClkOn; ++}; ++ ++// ++// IOCTL functions ++// ++void ioctl_get_video_geometry(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_wait_for_video_event(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_get_grc_register(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_read_snoop_map(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_read_snoop_aggregate(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_set_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_get_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_reset_video_engine(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++ ++//vidoe fetch functions ++void ioctl_fetch_video_tiles(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_fetch_video_slices(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_run_length_encode_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_fetch_text_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++void ioctl_fetch_mode_13_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas); ++phys_addr_t get_phy_fb_start_address(struct AstRVAS *ast_rvas); ++bool video_geometry_change(struct AstRVAS *ast_rvas, u32 dwGRCEStatus); ++void update_video_geometry(struct AstRVAS *ast_rvas); ++ ++//interrupts ++void enable_grce_tse_interrupt(struct AstRVAS *ast_rvas); ++void disable_grce_tse_interrupt(struct AstRVAS *ast_rvas); ++u32 clear_tse_interrupt(struct AstRVAS *ast_rvas); ++bool clear_ldma_interrupt(struct AstRVAS *ast_rvas); ++bool clear_tfe_interrupt(struct AstRVAS *ast_rvas); ++bool clear_bse_interrupt(struct AstRVAS *ast_rvas); ++u32 get_screen_offset(struct AstRVAS *ast_rvas); ++// ++void setup_lmem(struct AstRVAS *ast_rvas); ++// ++// helper functions ++// ++ ++struct BSEAggregateRegister setUp_bse_bucket(u8 *abyBitIndexes, u8 byTotalBucketCount, ++ u8 byBSBytesPerPixel, u32 dwFetchWidthPixels, ++ u32 dwFetchHeight); ++void prepare_bse_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bNotLastEntry, ++ u16 wStride, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ bool bInterrupt); ++ ++void prepare_tfe_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bNotLastEntry, u8 bCheckSum, ++ bool bEnabledRLE, u16 wStride, u8 bytesPerPixel, ++ u32 dwFetchWidthPixels, u32 dwFetchHeight, ++ enum SelectedByteMode sbm, bool bRLEOverFLow, ++ bool bInterrupt); ++void prepare_tfe_text_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, bool bEnabledRLE, u32 dwFetchWidth, ++ u32 dwFetchHeight, enum DataProccessMode dpm, ++ bool bRLEOverFLow, bool bInterrupt); ++void prepare_ldma_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr, ++ phys_addr_t dest_addr, u32 dwLDMASize, u8 byNotLastEntry); ++ ++u8 get_text_mode_character_per_line(struct AstRVAS *ast_rvas, u16 wScreenWidth); ++u16 get_text_mode_fetch_lines(struct AstRVAS *ast_rvas, u16 wScreenHeight); ++void on_fetch_text_data(struct RvasIoctl *ri, bool bRLEOn, struct AstRVAS *ast_rvas); ++ ++void reset_snoop_engine(struct AstRVAS *ast_rvas); ++void set_snoop_engine(bool b_geom_chg, struct AstRVAS *ast_rvas); ++u64 reinterpret_32bpp_snoop_row_as_24bpp(u64 theSnoopRow); ++ ++void convert_snoop_map(struct AstRVAS *ast_rvas); ++void update_all_snoop_context(struct AstRVAS *ast_rvas); ++void get_snoop_map_data(struct AstRVAS *ast_rvas); ++void get_snoop_aggregate(struct AstRVAS *ast_rvas); ++ ++void sleep_on_ldma_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys); ++bool sleep_on_tfe_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys, ++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize, ++ u32 *pdwCheckSum); ++ ++bool sleep_on_tfe_text_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys, ++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize, ++ u32 *pdwCheckSum); ++ ++bool sleep_on_bse_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys, ++ struct BSEAggregateRegister aBSEAR, u32 size); ++ ++void enable_grce_tse_interrupt(struct AstRVAS *ast_rvas); ++void disable_grce_tse_interrupt(struct AstRVAS *ast_rvas); ++ ++bool host_suspended(struct AstRVAS *pAstRVAS); ++#endif // __HARDWAREENGINES_H__ +diff --git a/drivers/soc/aspeed/rvas/video.h b/drivers/soc/aspeed/rvas/video.h +--- a/drivers/soc/aspeed/rvas/video.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video.h 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,41 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/****************************************************************************** ++ * video.h ++ * ++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller. ++ * Refer to the README file included with this package for driver version and adapter compatibility. ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ */ ++ ++#ifndef __RVAS_VIDEO_H__ ++#define __RVAS_VIDEO_H__ ++ ++#define RVAS_DRIVER_NAME "rvas" ++#define Stringify(x) #x ++ ++// ++//functions ++// ++void ioctl_new_context(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS); ++void ioctl_delete_context(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS); ++void ioctl_alloc(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS); ++void ioctl_free(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS); ++void ioctl_update_lms(u8 lms_on, struct AstRVAS *ast_rvas); ++void ioctl_update_lms_2700(u8 lms_on, struct AstRVAS *ast_rvas); ++u32 ioctl_get_lm_status(struct AstRVAS *ast_rvas); ++u32 ioctl_get_lm_status_2700(struct AstRVAS *ast_rvas); ++ ++//void* get_from_rsvd_mem(u32 size, u32 *phys_add, struct AstRVAS *pAstRVAS); ++void *get_virt_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS); ++dma_addr_t get_phys_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS); ++u32 get_len_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS); ++ ++//int release_rsvd_mem(u32 size, u32 phys_add); ++bool virt_is_valid_rsvd_mem(u32 index, u32 size, struct AstRVAS *pAstRVAS); ++ ++struct ContextTable *get_new_context_table_entry(struct AstRVAS *pAstRVAS); ++struct ContextTable *get_context_entry(const void *crc, struct AstRVAS *pAstRVAS); ++bool remove_context_table_entry(const void *crmh, struct AstRVAS *pAstRVAS); ++ ++#endif // __RVAS_VIDEO_H__ +diff --git a/drivers/soc/aspeed/rvas/video_debug.h b/drivers/soc/aspeed/rvas/video_debug.h +--- a/drivers/soc/aspeed/rvas/video_debug.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video_debug.h 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. ++ */ ++ ++#ifndef AST_VIDEO_DEBUG_H_ ++#define AST_VIDEO_DEBUG_H_ ++ ++#include ++#include ++#include ++ ++//#define RVAS_VIDEO_DEBUG ++//#define VIDEO_ENGINE_DEBUG ++//#define HARDWARE_ENGINE_DEBUG ++ ++#ifdef RVAS_VIDEO_DEBUG ++#define VIDEO_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); }) ++#else ++#define VIDEO_DBG(fmt, args...) do { (void)(fmt); (void)(args); } while (0) ++#endif // RVAS_VIDEO_DEBUG ++ ++#ifdef VIDEO_ENGINE_DEBUG ++#define VIDEO_ENG_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); }) ++#else ++#define VIDEO_ENG_DBG(fmt, args...) do { (void)(fmt); (void)(args); } while (0) ++#endif // RVAS_VIDEO_DEBUG ++ ++#ifdef HARDWARE_ENGINE_DEBUG ++#define HW_ENG_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); }) ++#else ++#define HW_ENG_DBG(fmt, args...) do { (void)(fmt); (void)(args); } while (0) ++#endif // RVAS_VIDEO_DEBUG ++ ++#endif // AST_VIDEO_DEBUG_H_ +diff --git a/drivers/soc/aspeed/rvas/video_engine.c b/drivers/soc/aspeed/rvas/video_engine.c +--- a/drivers/soc/aspeed/rvas/video_engine.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video_engine.c 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,1338 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * File Name : video_engines.c ++ * Description : AST2600 video engines ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "video_ioctl.h" ++#include "video_engine.h" ++#include "video_debug.h" ++#include "hardware_engines.h" ++ ++// ++//functions ++// ++static inline void video_write(struct AstRVAS *pAstRVAS, u32 val, u32 reg); ++static inline u32 video_read(struct AstRVAS *pAstRVAS, u32 reg); ++ ++static u32 get_vga_mem_base(struct AstRVAS *pAstRVAS); ++static int reserve_video_engine_memory(struct AstRVAS *pAstRVAS); ++static void init_jpeg_table(struct AstRVAS *pAstRVAS); ++static void video_set_scaling(struct AstRVAS *pAstRVAS); ++static int video_capture_trigger(struct AstRVAS *pAstRVAS); ++static void dump_buffer(phys_addr_t qwPhyStreamAddress, u32 size); ++ ++// ++// function definitions ++// ++/** ++ * _make_addr - make address fit for ast2700 ++ * @addr: dma address for hardware to work ++ * ++ * Return: 32bit format of address ++ */ ++static inline u32 _make_addr(dma_addr_t addr) ++{ ++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ++ // In ast2700, it store higt byte[35:32] in low byte[3:0] ++ return (addr >> 32) | (u32)(addr); ++#else ++ return addr; ++#endif ++} ++ ++void ioctl_get_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS) ++{ ++ u32 VR004_SeqCtrl = video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL); ++ u32 VR060_ComCtrl = video_read(pAstRVAS, AST_VIDEO_COMPRESS_CTRL); ++ ++ // status ++ pVideoConfig->rs = SuccessStatus; ++ ++ pVideoConfig->engine = 0; // engine = 1 is Video Management ++ pVideoConfig->capture_format = 0; ++ pVideoConfig->compression_mode = 0; ++ ++ pVideoConfig->compression_format = (VR004_SeqCtrl >> 13) & 0x1; ++ pVideoConfig->YUV420_mode = (VR004_SeqCtrl >> 10) & 0x3; ++ pVideoConfig->AutoMode = (VR004_SeqCtrl >> 5) & 0x1; ++ ++ pVideoConfig->rc4_enable = (VR060_ComCtrl >> 5) & 0x1; ++ pVideoConfig->Visual_Lossless = (VR060_ComCtrl >> 16) & 0x1; ++ pVideoConfig->Y_JPEGTableSelector = VIDEO_GET_DCT_LUM(VR060_ComCtrl); ++ pVideoConfig->AdvanceTableSelector = (VR060_ComCtrl >> 27) & 0xf; ++} ++ ++void ioctl_set_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS) ++{ ++ int i, base = 0; ++ u32 ctrl = 0; //for VR004, VR204 ++ u32 compress_ctrl = 0x00080000; ++ u32 *tlb_table = pAstRVAS->vem.jpegTable.pVirt; ++ ++ // status ++ pVideoConfig->rs = SuccessStatus; ++ ++ VIDEO_ENG_DBG("\n"); ++ ++ ctrl = video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL); ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ++ ~(G6_VIDEO_FRAME_CT_MASK | G6_VIDEO_MULTI_JPEG_MODE | G6_VIDEO_MULTI_JPEG_FLAG_MODE), AST_VIDEO_PASS_CTRL); ++ ++ ctrl &= ~VIDEO_AUTO_COMPRESS; ++ ctrl |= G5_VIDEO_COMPRESS_JPEG_MODE; ++ ctrl &= ~VIDEO_COMPRESS_FORMAT_MASK; //~(3<<10) bit 4 is set to 0 ++ ++ if (pVideoConfig->YUV420_mode) ++ ctrl |= VIDEO_COMPRESS_FORMAT(YUV420); ++ ++ if (pVideoConfig->rc4_enable) ++ compress_ctrl |= VIDEO_ENCRYP_ENABLE; ++ ++ switch (pVideoConfig->compression_mode) { ++ case 0: //DCT only ++ compress_ctrl |= VIDEO_DCT_ONLY_ENCODE; ++ break; ++ case 1: //DCT VQ mix 2-color ++ compress_ctrl &= ~(VIDEO_4COLOR_VQ_ENCODE | VIDEO_DCT_ONLY_ENCODE); ++ break; ++ case 2: //DCT VQ mix 4-color ++ compress_ctrl |= VIDEO_4COLOR_VQ_ENCODE; ++ break; ++ default: ++ dev_err(pAstRVAS->pdev, "unknown compression mode:%d\n", pVideoConfig->compression_mode); ++ break; ++ } ++ ++ if (pVideoConfig->Visual_Lossless) { ++ compress_ctrl |= VIDEO_HQ_ENABLE; ++ compress_ctrl |= VIDEO_HQ_DCT_LUM(pVideoConfig->AdvanceTableSelector); ++ compress_ctrl |= VIDEO_HQ_DCT_CHROM((pVideoConfig->AdvanceTableSelector + 16)); ++ } else { ++ compress_ctrl &= ~VIDEO_HQ_ENABLE; ++ } ++ ++ video_write(pAstRVAS, ctrl, AST_VIDEO_SEQ_CTRL); ++ // we are using chrominance quantization table instead of luminance quantization table ++ video_write(pAstRVAS, compress_ctrl | VIDEO_DCT_LUM(pVideoConfig->Y_JPEGTableSelector) | VIDEO_DCT_CHROM(pVideoConfig->Y_JPEGTableSelector + 16), AST_VIDEO_COMPRESS_CTRL); ++ VIDEO_ENG_DBG("VR04: %#X\n", video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL)); ++ VIDEO_ENG_DBG("VR60: %#X\n", video_read(pAstRVAS, AST_VIDEO_COMPRESS_CTRL)); ++ ++ // chose a table for JPEG or multi-JPEG ++ if (pVideoConfig->compression_format >= 1) { ++ VIDEO_ENG_DBG("Choose a JPEG Table\n"); ++ for (i = 0; i < 12; i++) { ++ base = (1024 * i); ++ //base = (256 * i); ++ if (pVideoConfig->YUV420_mode) //yuv420 ++ tlb_table[base + 46] = 0x00220103; //for YUV420 mode ++ else ++ tlb_table[base + 46] = 0x00110103; //for YUV444 mode) ++ } ++ } ++ ++ video_set_scaling(pAstRVAS); ++} ++ ++// ++void ioctl_get_video_engine_data_2700(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, dma_addr_t dwPhyStreamAddress) ++{ ++ u32 yuv_shift; ++ u32 scan_lines; ++ int timeout = 0; ++ u32 x0; ++ u32 y0; ++ phys_addr_t start_addr; ++ u32 frame_count = 0; ++ u32 old_src_addr, new_src_addr; ++ u32 offset; ++ ++ pArrayMJConfig->rs = SuccessStatus; ++ ++ VIDEO_ENG_DBG("\n"); ++ VIDEO_ENG_DBG("before Stream buffer: %#llx\n", dwPhyStreamAddress); ++ //dump_buffer(dwPhyStreamAddress,100); ++ ++ video_write(pAstRVAS, _make_addr(dwPhyStreamAddress), AST_VIDEO_STREAM_BUFF); ++ ++ if (host_suspended(pAstRVAS)) { ++ pArrayMJConfig->rs = HostSuspended; ++ VIDEO_ENG_DBG("HostSuspended Timeout\n"); ++ return; ++ } ++ ++ VIDEO_ENG_DBG("irq status: %#x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS)); ++ ++#ifdef AUTO_COMPRESS ++ if (video_cc_auto_trigger(pAstRVAS) == 0) { ++ VIDEO_ENG_DBG("auto Ccc Timeout\n"); ++ pArrayMJConfig->multi_jpeg_frames = 0; ++ pArrayMJConfig->rs = CompressionTimedOut; ++ return; ++ } ++#else ++ if (video_capture_trigger(pAstRVAS) == 0) { ++ dev_err(pAstRVAS->pdev, " capture timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS)); ++ pArrayMJConfig->multi_jpeg_frames = 0; ++ pArrayMJConfig->rs = CaptureTimedOut; ++ return; ++ } ++ // clear all the interrupt since there is a bug in HW engine ast2700 ++ // only capture interrupt is enable, but compression intterupt is generated ++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE | VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_STS); ++#endif ++ ++ // start compression setup ++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_EN); ++ init_completion(&pAstRVAS->video_compression_complete); ++ ++ scan_lines = video_read(pAstRVAS, AST_VIDEO_SOURCE_SCAN_LINE); ++ frame_count = video_read(pAstRVAS, AST_VIDEO_COMPRESS_FRAME_COUNT_RB); ++ ++ // VR07C odd , get source from 0x4C ++ // VR07C even, get source from 0x44 ++ old_src_addr = (frame_count & 0x01) ? AST_VIDEO_SOURCE_BUFF0 : AST_VIDEO_SOURCE_BUFF1; ++ new_src_addr = (frame_count & 0x01) ? AST_VIDEO_SOURCE_BUFF1 : AST_VIDEO_SOURCE_BUFF0; ++ ++ start_addr = video_read(pAstRVAS, old_src_addr); ++ // make sure BCD is disable ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL); ++ ++ if (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & VIDEO_COMPRESS_FORMAT(YUV420)) { ++ // YUV 420 ++ VIDEO_ENG_DBG("Debug: YUV420\n"); ++ yuv_shift = 4; ++ } else { ++ VIDEO_ENG_DBG("Debug: YUV444\n"); ++ yuv_shift = 3; ++ } ++ ++ //update compress window ++ video_write(pAstRVAS, ++ pArrayMJConfig->frame[0].wWidthPixels << 16 | ++ pArrayMJConfig->frame[0].wHeightPixels, AST_VIDEO_COMPRESS_WIN); ++ ++ x0 = pArrayMJConfig->frame[0].wXPixels; ++ y0 = pArrayMJConfig->frame[0].wYPixels; ++ ++ offset = (scan_lines * y0) + ((256 * x0) >> yuv_shift); ++ ++ video_write(pAstRVAS, start_addr + offset, new_src_addr); ++ VIDEO_ENG_DBG("write to %#x, with address: %#x", start_addr + offset, new_src_addr); ++ // trigger compression ++ VIDEO_ENG_DBG("trigger compression\n"); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) | ++ VIDEO_COMPRESS_TRIGGER, AST_VIDEO_SEQ_CTRL); ++ ++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_compression_complete, HZ); ++ ++ if (timeout == 0) { ++ dev_err(pAstRVAS->pdev, " compression timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS)); ++ pArrayMJConfig->multi_jpeg_frames = 0; ++ pArrayMJConfig->rs = CompressionTimedOut; ++ } ++ pArrayMJConfig->multi_jpeg_frames = 1; ++ pArrayMJConfig->frame[0].dwSizeInBytes = video_read(pAstRVAS, AST_VIDEO_JPEG_SIZE); ++ ++ VIDEO_ENG_DBG("compressed size: %d\n", pArrayMJConfig->frame[0].dwSizeInBytes); ++ ++ //clear ++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ++ ~(G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_MULTI_FRAME | VIDEO_COMPRESS_TRIGGER)) ++ , AST_VIDEO_SEQ_CTRL); ++ ++ pAstRVAS->sequence++; ++ if (pAstRVAS->sequence & 0x01) { ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF0);//44h ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch ++ } else { ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF0);//44h ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch ++ } ++ VIDEO_ENG_DBG("[%#x]: %#x\n", AST_VIDEO_SOURCE_BUFF0, video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF0)); ++ VIDEO_ENG_DBG("[%#x]: %#x\n", AST_VIDEO_SOURCE_BUFF1, video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF1)); ++ ++ //TODO: kernel dump here... ++ //dump_buffer(dwPhyStreamAddress,100); ++} ++ ++void ioctl_get_video_engine_data(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, phys_addr_t dwPhyStreamAddress) ++{ ++ u32 yuv_shift; ++ u32 yuv_msk; ++ u32 scan_lines; ++ int timeout = 0; ++ u32 x0; ++ u32 y0; ++ int i = 0; ++ u32 dw_w_h; ++ phys_addr_t start_addr; ++ u32 multi_jpeg_data = 0; ++ u32 VR044; ++ u32 nextFrameOffset = 0; ++ ++ pArrayMJConfig->rs = SuccessStatus; ++ ++ VIDEO_ENG_DBG("\n"); ++ VIDEO_ENG_DBG("before Stream buffer:\n"); ++ //dump_buffer(dwPhyStreamAddress,100); ++ ++ video_write(pAstRVAS, dwPhyStreamAddress, AST_VIDEO_STREAM_BUFF); ++ ++ if (host_suspended(pAstRVAS)) { ++ pArrayMJConfig->rs = HostSuspended; ++ VIDEO_ENG_DBG("HostSuspended Timeout\n"); ++ return; ++ } ++ ++ if (video_capture_trigger(pAstRVAS) == 0) { ++ pArrayMJConfig->rs = CaptureTimedOut; ++ VIDEO_ENG_DBG("Capture Timeout\n"); ++ return; ++ } ++ //dump_buffer(dwPhyStreamAddress,100); ++ // start compression setup ++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_EN); ++ init_completion(&pAstRVAS->video_compression_complete); ++ VIDEO_ENG_DBG("capture complete buffer:\n"); ++ ++ //dump_buffer(vem.captureBuf0.phy,100); ++ VR044 = video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF0); ++ ++ scan_lines = video_read(pAstRVAS, AST_VIDEO_SOURCE_SCAN_LINE); ++ VIDEO_ENG_DBG("scan_lines: %#x\n", scan_lines); ++ ++ if (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & VIDEO_COMPRESS_FORMAT(YUV420)) { ++ // YUV 420 ++ VIDEO_ENG_DBG("Debug: YUV420\n"); ++ yuv_shift = 4; ++ yuv_msk = 0xf; ++ } else { ++ // YUV 444 ++ VIDEO_ENG_DBG("Debug: YUV444\n"); ++ yuv_shift = 3; ++ yuv_msk = 0x7; ++ } ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | G6_VIDEO_MULTI_JPEG_FLAG_MODE | ++ (G6_VIDEO_JPEG__COUNT(pArrayMJConfig->multi_jpeg_frames - 1) | G6_VIDEO_MULTI_JPEG_MODE), AST_VIDEO_PASS_CTRL); ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL); ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_ADDRESS_MAP_MULTI_JPEG, AST_VIDEO_CTRL); ++ ++ for (i = 0; i < pArrayMJConfig->multi_jpeg_frames; i++) { ++ VIDEO_ENG_DBG("Debug: Before: [%d]: x: %#x y: %#x w: %#x h: %#x\n", i, ++ pArrayMJConfig->frame[i].wXPixels, ++ pArrayMJConfig->frame[i].wYPixels, ++ pArrayMJConfig->frame[i].wWidthPixels, ++ pArrayMJConfig->frame[i].wHeightPixels); ++ x0 = pArrayMJConfig->frame[i].wXPixels; ++ y0 = pArrayMJConfig->frame[i].wYPixels; ++ dw_w_h = SET_FRAME_W_H(pArrayMJConfig->frame[i].wWidthPixels, pArrayMJConfig->frame[i].wHeightPixels); ++ ++ start_addr = VR044 + (scan_lines * y0) + ((256 * x0) / (1 << yuv_shift)); ++ ++ VIDEO_ENG_DBG("VR%x dw_w_h: %#x, VR%x : addr : %#x, x0 %d, y0 %d\n", ++ AST_VIDEO_MULTI_JPEG_SRAM + (8 * i), dw_w_h, ++ AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4, start_addr, x0, y0); ++ video_write(pAstRVAS, dw_w_h, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i)); ++ video_write(pAstRVAS, start_addr, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4); ++ } ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER), AST_VIDEO_SEQ_CTRL); ++ ++ //set mode for multi-jpeg mode VR004[5:3] ++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~VIDEO_AUTO_COMPRESS) ++ | VIDEO_CAPTURE_MULTI_FRAME | G5_VIDEO_COMPRESS_JPEG_MODE, AST_VIDEO_SEQ_CTRL); ++ ++ //If CPU is too fast, pleas read back and trigger ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) | VIDEO_COMPRESS_TRIGGER, AST_VIDEO_SEQ_CTRL); ++ VIDEO_ENG_DBG("wait_for_completion_interruptible_timeout...\n"); ++ ++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_compression_complete, HZ / 2); ++ ++ if (timeout == 0) { ++ dev_err(pAstRVAS->pdev, "multi compression timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS)); ++ pArrayMJConfig->multi_jpeg_frames = 0; ++ pArrayMJConfig->rs = CompressionTimedOut; ++ } else { ++ VIDEO_ENG_DBG("400 %x , 404 %x\n", video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM), video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 4)); ++ VIDEO_ENG_DBG("408 %x , 40c %x\n", video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 8), video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 0xC)); ++ VIDEO_ENG_DBG("done reading 408\n"); ++ ++ for (i = 0; i < pArrayMJConfig->multi_jpeg_frames; i++) { ++ pArrayMJConfig->frame[i].dwOffsetInBytes = nextFrameOffset; ++ ++ multi_jpeg_data = video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4); ++ if (multi_jpeg_data & BIT(7)) { ++ pArrayMJConfig->frame[i].dwSizeInBytes = video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i)) & 0xffffff; ++ nextFrameOffset = (multi_jpeg_data & ~BIT(7)) >> 1; ++ } else { ++ pArrayMJConfig->frame[i].dwSizeInBytes = 0; ++ nextFrameOffset = 0; ++ } ++ VIDEO_ENG_DBG("[%d] size %d, dwOffsetInBytes %x\n", i, pArrayMJConfig->frame[i].dwSizeInBytes, pArrayMJConfig->frame[i].dwOffsetInBytes); ++ } //for ++ } ++ ++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_MULTI_FRAME)) ++ | VIDEO_AUTO_COMPRESS, AST_VIDEO_SEQ_CTRL); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ++ ~(G6_VIDEO_FRAME_CT_MASK | G6_VIDEO_MULTI_JPEG_MODE), AST_VIDEO_PASS_CTRL); ++ ++ //VIDEO_ENG_DBG("after Stream buffer:\n"); ++ //dump_buffer(dwPhyStreamAddress,100); ++} ++ ++irqreturn_t ast_video_isr(int this_irq, void *dev_id) ++{ ++ u32 status; ++ u32 enabled_irq; ++ struct AstRVAS *pAstRVAS = dev_id; ++ ++ status = video_read(pAstRVAS, AST_VIDEO_INT_STS); ++ enabled_irq = video_read(pAstRVAS, AST_VIDEO_INT_EN); ++ ++ VIDEO_ENG_DBG("sts: %#x enabled: %#x\n", status, enabled_irq); ++ ++ status &= enabled_irq; ++ if (status & VIDEO_COMPRESS_COMPLETE) { ++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_STS); ++ complete(&pAstRVAS->video_compression_complete); ++ } ++ if (status & VIDEO_CAPTURE_COMPLETE) { ++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE, AST_VIDEO_INT_STS); ++ VIDEO_ENG_DBG("capture complete\n"); ++ complete(&pAstRVAS->video_capture_complete); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++void enable_video_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ u32 intCtrReg = video_read(pAstRVAS, AST_VIDEO_INT_EN); ++ ++ intCtrReg = (VIDEO_COMPRESS_COMPLETE | VIDEO_CAPTURE_COMPLETE); ++ video_write(pAstRVAS, intCtrReg, AST_VIDEO_INT_EN); ++} ++ ++void disable_video_interrupt(struct AstRVAS *pAstRVAS) ++{ ++ video_write(pAstRVAS, 0, AST_VIDEO_INT_EN); ++ video_write(pAstRVAS, 0xffffffff, AST_VIDEO_INT_STS); ++} ++ ++void video_engine_rc4Reset(struct AstRVAS *pAstRVAS) ++{ ++ //rc4 init reset .. ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) & ~VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL); ++} ++ ++// setup functions ++int video_engine_reserveMem(struct AstRVAS *pAstRVAS) ++{ ++ int result = 0; ++ ++ // reserve mem ++ result = reserve_video_engine_memory(pAstRVAS); ++ if (result < 0) { ++ dev_err(pAstRVAS->pdev, "Error Reserving Video Engine Memory\n"); ++ return result; ++ } ++ return 0; ++} ++ ++int free_video_engine_memory(struct AstRVAS *pAstRVAS) ++{ ++ int size = pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size + pAstRVAS->vem.jpegTable.size; ++ ++ if (size && pAstRVAS->vem.captureBuf0.pVirt) { ++ dma_free_coherent(pAstRVAS->pdev, size, ++ pAstRVAS->vem.captureBuf0.pVirt, ++ pAstRVAS->vem.captureBuf0.phy); ++ } else { ++ return -1; ++ } ++ VIDEO_ENG_DBG("After dma_free_coherent\n"); ++ ++ return 0; ++} ++ ++// this function needs to be called when graphic mode change ++void video_set_Window(struct AstRVAS *pAstRVAS) ++{ ++ u32 scan_line; ++ u32 screenHeightAligned = ((pAstRVAS->current_vg.wScreenHeight + 0x1f) & (~0x1f)); ++ ++ VIDEO_ENG_DBG("\n"); ++ ++ //set direct mode ++ if (pAstRVAS->config->version == 7) { ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL); ++ video_write(pAstRVAS, _make_addr(get_vga_mem_base(pAstRVAS)), AST_VIDEO_DIRECT_BASE); ++ video_write(pAstRVAS, VIDEO_FETCH_TIMING(0) | VIDEO_FETCH_LINE_OFFSET(pAstRVAS->current_vg.wStride * 4), AST_VIDEO_DIRECT_CTRL); ++ } ++ //compression x,y ++ video_write(pAstRVAS, VIDEO_COMPRESS_H(pAstRVAS->current_vg.wStride) | VIDEO_COMPRESS_V(screenHeightAligned), AST_VIDEO_COMPRESS_WIN); ++ VIDEO_ENG_DBG("reg offset[%#x]: %#x\n", AST_VIDEO_COMPRESS_WIN, video_read(pAstRVAS, AST_VIDEO_COMPRESS_WIN)); ++ ++ if (pAstRVAS->current_vg.wStride == 1680) ++ video_write(pAstRVAS, VIDEO_CAPTURE_H(1728) | VIDEO_CAPTURE_V(screenHeightAligned), AST_VIDEO_CAPTURE_WIN); ++ else ++ video_write(pAstRVAS, VIDEO_CAPTURE_H(pAstRVAS->current_vg.wStride) | VIDEO_CAPTURE_V(screenHeightAligned), AST_VIDEO_CAPTURE_WIN); ++ ++ VIDEO_ENG_DBG("reg offset[%#x]: %#x\n", AST_VIDEO_CAPTURE_WIN, video_read(pAstRVAS, AST_VIDEO_CAPTURE_WIN)); ++ ++ // set scan_line VR048 ++ if ((pAstRVAS->current_vg.wStride % 8) == 0) { ++ video_write(pAstRVAS, pAstRVAS->current_vg.wStride * 4, AST_VIDEO_SOURCE_SCAN_LINE); ++ } else { ++ scan_line = pAstRVAS->current_vg.wStride; ++ scan_line = scan_line + 16 - (scan_line % 16); ++ scan_line = scan_line * 4; ++ video_write(pAstRVAS, scan_line, AST_VIDEO_SOURCE_SCAN_LINE); ++ } ++} ++ ++void set_direct_mode(struct AstRVAS *pAstRVAS) ++{ ++ int Direct_Mode = 0; ++ u32 ColorDepthIndex; ++ u32 VGA_Scratch_Register_350, VGA_Scratch_Register_354, VGA_Scratch_Register_34C, Color_Depth; ++ ++ VIDEO_ENG_DBG("\n"); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL); ++ ++ VGA_Scratch_Register_350 = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_350); ++ VGA_Scratch_Register_34C = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_34C); ++ VGA_Scratch_Register_354 = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_354); ++ ++ if (((VGA_Scratch_Register_350 & 0xff00) >> 8) == 0xA8) { ++ Color_Depth = ((VGA_Scratch_Register_350 & 0xff0000) >> 16); ++ ++ if (Color_Depth < 15) ++ Direct_Mode = 0; ++ else ++ Direct_Mode = 1; ++ ++ } else { //Original mode information ++ ColorDepthIndex = (VGA_Scratch_Register_34C >> 4) & 0x0F; ++ ++ if (ColorDepthIndex == 0xe || ColorDepthIndex == 0xf) { ++ Direct_Mode = 0; ++ } else { ++ if (ColorDepthIndex > 2) ++ Direct_Mode = 1; ++ else ++ Direct_Mode = 0; ++ } ++ } ++ ++ if (Direct_Mode) { ++ VIDEO_ENG_DBG("Direct Mode\n"); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL); ++ video_write(pAstRVAS, _make_addr(get_vga_mem_base(pAstRVAS)), AST_VIDEO_DIRECT_BASE); ++ video_write(pAstRVAS, VIDEO_FETCH_TIMING(0) | VIDEO_FETCH_LINE_OFFSET(pAstRVAS->current_vg.wStride * 4), AST_VIDEO_DIRECT_CTRL); ++ } else { ++ VIDEO_ENG_DBG("Sync None Direct Mode\n"); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL); ++ } ++} ++ ++// return timeout 0 - timeout; non 0 is successful ++static int video_capture_trigger(struct AstRVAS *pAstRVAS) ++{ ++ int timeout = 0; ++ ++ VIDEO_ENG_DBG("\n"); ++ ++ // only enable capture interrupt ++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE, AST_VIDEO_INT_EN); ++ ++ init_completion(&pAstRVAS->video_capture_complete); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | ++ VIDEO_SET_CAPTURE_FORMAT(1) | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL); ++ ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER | VIDEO_AUTO_COMPRESS), AST_VIDEO_SEQ_CTRL); ++ //If CPU is too fast, pleas read back and trigger ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) | G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_TRIGGER, AST_VIDEO_SEQ_CTRL); ++ ++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_capture_complete, HZ / 2); ++ ++ if (timeout == 0) ++ dev_err(pAstRVAS->pdev, "Capture timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS)); ++ ++ //clear ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER | VIDEO_AUTO_COMPRESS), AST_VIDEO_SEQ_CTRL); ++ ++ //dump_buffer(pAstRVAS->vem.captureBuf0.phy, 1024); ++ return timeout; ++} ++ ++// ++// static functions ++// ++static u32 get_vga_mem_base(struct AstRVAS *pAstRVAS) ++{ ++ u32 vga_mem_size, mem_size; ++ ++ mem_size = pAstRVAS->FBInfo.dwDRAMSize; ++ vga_mem_size = pAstRVAS->FBInfo.dwVGASize; ++ VIDEO_ENG_DBG("VGA Info : MEM Size %dMB, VGA Mem Size %dMB\n", mem_size / 1024 / 1024, vga_mem_size / 1024 / 1024); ++ return (mem_size - vga_mem_size); ++} ++ ++static void dump_buffer(phys_addr_t dwPhyStreamAddress, u32 size) ++{ ++ u32 iC; ++ u32 val = 0; ++ ++ for (iC = 0; iC < size; iC += 4) { ++ val = readl((void *)(dwPhyStreamAddress + iC)); ++ VIDEO_ENG_DBG("%#x, ", val); ++ } ++} ++ ++static void video_set_scaling(struct AstRVAS *pAstRVAS) ++{ ++ u32 ctrl = video_read(pAstRVAS, AST_VIDEO_CTRL); ++ //no scaling ++ ctrl &= ~VIDEO_CTRL_DWN_SCALING_MASK; ++ ++ VIDEO_ENG_DBG("Scaling Disable\n"); ++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING0); ++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING1); ++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING2); ++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING3); ++ ++ video_write(pAstRVAS, 0x10001000, AST_VIDEO_SCAL_FACTOR); ++ video_write(pAstRVAS, ctrl, AST_VIDEO_CTRL); ++ ++ video_set_Window(pAstRVAS); ++} ++ ++void video_ctrl_init(struct AstRVAS *pAstRVAS) ++{ ++ u8 inputdelay = 0x4; ++ ++ VIDEO_ENG_DBG("\n"); ++ if (pAstRVAS->config->version == 7) { ++ VIDEO_ENG_DBG("reg address: 0x%llx\n", pAstRVAS->video_reg_base); ++ /* Unlock VE registers */ ++ video_write(pAstRVAS, VIDEO_PROTECT_UNLOCK, AST_VIDEO_PROTECT); ++ inputdelay = 0x1; ++ /* Clear the offset */ ++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_PRO); ++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ); ++ } ++ ++ /* disable interrupts */ ++ video_write(pAstRVAS, 0, AST_VIDEO_INT_EN); ++ video_write(pAstRVAS, 0xffffffff, AST_VIDEO_INT_STS); ++ video_write(pAstRVAS, 0, AST_VIDEO_BCD_CTRL); ++ ++ /*write src addr and jped addr to register*/ ++ pAstRVAS->sequence = 1; ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF0);//44h ++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch ++ video_write(pAstRVAS, pAstRVAS->vem.jpegTable.phy, AST_VIDEO_JPEG_HEADER_BUFF); //40h ++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ); //3Ch ++ ++ // ============================= JPEG init =========================================== ++ init_jpeg_table(pAstRVAS); ++ VIDEO_ENG_DBG("JpegTable in Memory:0x%llx\n", pAstRVAS->vem.jpegTable.pVirt); ++ //dump_buffer(pAstRVAS->vem.jpegTable.phy, 80); ++ ++ // =================================================================================== ++ //Specification define bit 12:13 must always 0; ++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ++ ~(VIDEO_DUAL_EDGE_MODE | VIDEO_18BIT_SINGLE_EDGE)) | ++ VIDEO_DVO_INPUT_DELAY(inputdelay), ++ AST_VIDEO_PASS_CTRL); ++ ++ video_write(pAstRVAS, VIDEO_STREAM_PKT_N(STREAM_32_PKTS) | ++ VIDEO_STREAM_PKT_SIZE(STREAM_128KB), AST_VIDEO_STREAM_SIZE); ++ //rc4 init reset .. ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL); ++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) & ~VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL); ++ ++ //CRC/REDUCE_BIT register clear ++ video_write(pAstRVAS, 0, AST_VIDEO_CRC1); ++ video_write(pAstRVAS, 0, AST_VIDEO_CRC2); ++ video_write(pAstRVAS, 0, AST_VIDEO_DATA_TRUNCA); ++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ); ++} ++ ++static int reserve_video_engine_memory(struct AstRVAS *pAstRVAS) ++{ ++ u32 size; ++ dma_addr_t phys_add = 0; ++ void *virt_add = 0; ++ ++ memset(&pAstRVAS->vem, 0, sizeof(struct VideoEngineMem)); ++ pAstRVAS->vem.captureBuf0.size = VIDEO_CAPTURE_BUFFER_SIZE; //size 10M ++ pAstRVAS->vem.captureBuf1.size = VIDEO_CAPTURE_BUFFER_SIZE; //size 10M ++ pAstRVAS->vem.jpegTable.size = VIDEO_JPEG_TABLE_SIZE; //size 1M ++ ++ size = pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size + pAstRVAS->vem.jpegTable.size; ++ VIDEO_ENG_DBG("Allocating memory size: 0x%x\n", size); ++ virt_add = dma_alloc_coherent(pAstRVAS->pdev, size, &phys_add, ++ GFP_KERNEL); ++ ++ if (!virt_add) { ++ pr_err("Cannot alloc buffer for video engine\n"); ++ return -ENOMEM; ++ } ++ ++ pAstRVAS->vem.captureBuf0.phy = phys_add; ++ pAstRVAS->vem.captureBuf1.phy = phys_add + pAstRVAS->vem.captureBuf0.size; ++ pAstRVAS->vem.jpegTable.phy = phys_add + pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size; ++ ++ pAstRVAS->vem.captureBuf0.pVirt = (void *)virt_add; ++ pAstRVAS->vem.captureBuf1.pVirt = (void *)(virt_add + pAstRVAS->vem.captureBuf0.size); ++ pAstRVAS->vem.jpegTable.pVirt = (void *)(virt_add + pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size); ++ ++ VIDEO_ENG_DBG("Allocated: phys: 0x%llx\n", phys_add); ++ VIDEO_ENG_DBG("Phy: Buf0:0x%llx; Buf1:0x%llx; jpegT:0x%llx\n", pAstRVAS->vem.captureBuf0.phy, pAstRVAS->vem.captureBuf1.phy, pAstRVAS->vem.jpegTable.phy); ++ VIDEO_ENG_DBG("Virt: Buf0:0x%llx; Buf1:0x%llx; JpegT:0x%llx\n", pAstRVAS->vem.captureBuf0.pVirt, pAstRVAS->vem.captureBuf1.pVirt, pAstRVAS->vem.jpegTable.pVirt); ++ ++ return 0; ++} ++ ++/************************************************ JPEG ***************************************************************************************/ ++static void init_jpeg_table(struct AstRVAS *pAstRVAS) ++{ ++ int i = 0; ++ int base = 0; ++ u32 *tlb_table = pAstRVAS->vem.jpegTable.pVirt; ++ ++ //JPEG header default value: ++ for (i = 0; i < 12; i++) { ++ base = (256 * i); ++ tlb_table[base + 0] = 0xE0FFD8FF; ++ tlb_table[base + 1] = 0x464A1000; ++ tlb_table[base + 2] = 0x01004649; ++ tlb_table[base + 3] = 0x60000101; ++ tlb_table[base + 4] = 0x00006000; ++ tlb_table[base + 5] = 0x0F00FEFF; ++ tlb_table[base + 6] = 0x00002D05; ++ tlb_table[base + 7] = 0x00000000; ++ tlb_table[base + 8] = 0x00000000; ++ tlb_table[base + 9] = 0x00DBFF00; ++ tlb_table[base + 44] = 0x081100C0; ++ tlb_table[base + 45] = 0x00000000; ++ tlb_table[base + 47] = 0x03011102; ++ tlb_table[base + 48] = 0xC4FF0111; ++ tlb_table[base + 49] = 0x00001F00; ++ tlb_table[base + 50] = 0x01010501; ++ tlb_table[base + 51] = 0x01010101; ++ tlb_table[base + 52] = 0x00000000; ++ tlb_table[base + 53] = 0x00000000; ++ tlb_table[base + 54] = 0x04030201; ++ tlb_table[base + 55] = 0x08070605; ++ tlb_table[base + 56] = 0xFF0B0A09; ++ tlb_table[base + 57] = 0x10B500C4; ++ tlb_table[base + 58] = 0x03010200; ++ tlb_table[base + 59] = 0x03040203; ++ tlb_table[base + 60] = 0x04040505; ++ tlb_table[base + 61] = 0x7D010000; ++ tlb_table[base + 62] = 0x00030201; ++ tlb_table[base + 63] = 0x12051104; ++ tlb_table[base + 64] = 0x06413121; ++ tlb_table[base + 65] = 0x07615113; ++ tlb_table[base + 66] = 0x32147122; ++ tlb_table[base + 67] = 0x08A19181; ++ tlb_table[base + 68] = 0xC1B14223; ++ tlb_table[base + 69] = 0xF0D15215; ++ tlb_table[base + 70] = 0x72623324; ++ tlb_table[base + 71] = 0x160A0982; ++ tlb_table[base + 72] = 0x1A191817; ++ tlb_table[base + 73] = 0x28272625; ++ tlb_table[base + 74] = 0x35342A29; ++ tlb_table[base + 75] = 0x39383736; ++ tlb_table[base + 76] = 0x4544433A; ++ tlb_table[base + 77] = 0x49484746; ++ tlb_table[base + 78] = 0x5554534A; ++ tlb_table[base + 79] = 0x59585756; ++ tlb_table[base + 80] = 0x6564635A; ++ tlb_table[base + 81] = 0x69686766; ++ tlb_table[base + 82] = 0x7574736A; ++ tlb_table[base + 83] = 0x79787776; ++ tlb_table[base + 84] = 0x8584837A; ++ tlb_table[base + 85] = 0x89888786; ++ tlb_table[base + 86] = 0x9493928A; ++ tlb_table[base + 87] = 0x98979695; ++ tlb_table[base + 88] = 0xA3A29A99; ++ tlb_table[base + 89] = 0xA7A6A5A4; ++ tlb_table[base + 90] = 0xB2AAA9A8; ++ tlb_table[base + 91] = 0xB6B5B4B3; ++ tlb_table[base + 92] = 0xBAB9B8B7; ++ tlb_table[base + 93] = 0xC5C4C3C2; ++ tlb_table[base + 94] = 0xC9C8C7C6; ++ tlb_table[base + 95] = 0xD4D3D2CA; ++ tlb_table[base + 96] = 0xD8D7D6D5; ++ tlb_table[base + 97] = 0xE2E1DAD9; ++ tlb_table[base + 98] = 0xE6E5E4E3; ++ tlb_table[base + 99] = 0xEAE9E8E7; ++ tlb_table[base + 100] = 0xF4F3F2F1; ++ tlb_table[base + 101] = 0xF8F7F6F5; ++ tlb_table[base + 102] = 0xC4FFFAF9; ++ tlb_table[base + 103] = 0x00011F00; ++ tlb_table[base + 104] = 0x01010103; ++ tlb_table[base + 105] = 0x01010101; ++ tlb_table[base + 106] = 0x00000101; ++ tlb_table[base + 107] = 0x00000000; ++ tlb_table[base + 108] = 0x04030201; ++ tlb_table[base + 109] = 0x08070605; ++ tlb_table[base + 110] = 0xFF0B0A09; ++ tlb_table[base + 111] = 0x11B500C4; ++ tlb_table[base + 112] = 0x02010200; ++ tlb_table[base + 113] = 0x04030404; ++ tlb_table[base + 114] = 0x04040507; ++ tlb_table[base + 115] = 0x77020100; ++ tlb_table[base + 116] = 0x03020100; ++ tlb_table[base + 117] = 0x21050411; ++ tlb_table[base + 118] = 0x41120631; ++ tlb_table[base + 119] = 0x71610751; ++ tlb_table[base + 120] = 0x81322213; ++ tlb_table[base + 121] = 0x91421408; ++ tlb_table[base + 122] = 0x09C1B1A1; ++ tlb_table[base + 123] = 0xF0523323; ++ tlb_table[base + 124] = 0xD1726215; ++ tlb_table[base + 125] = 0x3424160A; ++ tlb_table[base + 126] = 0x17F125E1; ++ tlb_table[base + 127] = 0x261A1918; ++ tlb_table[base + 128] = 0x2A292827; ++ tlb_table[base + 129] = 0x38373635; ++ tlb_table[base + 130] = 0x44433A39; ++ tlb_table[base + 131] = 0x48474645; ++ tlb_table[base + 132] = 0x54534A49; ++ tlb_table[base + 133] = 0x58575655; ++ tlb_table[base + 134] = 0x64635A59; ++ tlb_table[base + 135] = 0x68676665; ++ tlb_table[base + 136] = 0x74736A69; ++ tlb_table[base + 137] = 0x78777675; ++ tlb_table[base + 138] = 0x83827A79; ++ tlb_table[base + 139] = 0x87868584; ++ tlb_table[base + 140] = 0x928A8988; ++ tlb_table[base + 141] = 0x96959493; ++ tlb_table[base + 142] = 0x9A999897; ++ tlb_table[base + 143] = 0xA5A4A3A2; ++ tlb_table[base + 144] = 0xA9A8A7A6; ++ tlb_table[base + 145] = 0xB4B3B2AA; ++ tlb_table[base + 146] = 0xB8B7B6B5; ++ tlb_table[base + 147] = 0xC3C2BAB9; ++ tlb_table[base + 148] = 0xC7C6C5C4; ++ tlb_table[base + 149] = 0xD2CAC9C8; ++ tlb_table[base + 150] = 0xD6D5D4D3; ++ tlb_table[base + 151] = 0xDAD9D8D7; ++ tlb_table[base + 152] = 0xE5E4E3E2; ++ tlb_table[base + 153] = 0xE9E8E7E6; ++ tlb_table[base + 154] = 0xF4F3F2EA; ++ tlb_table[base + 155] = 0xF8F7F6F5; ++ tlb_table[base + 156] = 0xDAFFFAF9; ++ tlb_table[base + 157] = 0x01030C00; ++ tlb_table[base + 158] = 0x03110200; ++ tlb_table[base + 159] = 0x003F0011; ++ ++ //Table 0 ++ if (i == 0) { ++ tlb_table[base + 10] = 0x0D140043; ++ tlb_table[base + 11] = 0x0C0F110F; ++ tlb_table[base + 12] = 0x11101114; ++ tlb_table[base + 13] = 0x17141516; ++ tlb_table[base + 14] = 0x1E20321E; ++ tlb_table[base + 15] = 0x3D1E1B1B; ++ tlb_table[base + 16] = 0x32242E2B; ++ tlb_table[base + 17] = 0x4B4C3F48; ++ tlb_table[base + 18] = 0x44463F47; ++ tlb_table[base + 19] = 0x61735A50; ++ tlb_table[base + 20] = 0x566C5550; ++ tlb_table[base + 21] = 0x88644644; ++ tlb_table[base + 22] = 0x7A766C65; ++ tlb_table[base + 23] = 0x4D808280; ++ tlb_table[base + 24] = 0x8C978D60; ++ tlb_table[base + 25] = 0x7E73967D; ++ tlb_table[base + 26] = 0xDBFF7B80; ++ tlb_table[base + 27] = 0x1F014300; ++ tlb_table[base + 28] = 0x272D2121; ++ tlb_table[base + 29] = 0x3030582D; ++ tlb_table[base + 30] = 0x697BB958; ++ tlb_table[base + 31] = 0xB8B9B97B; ++ tlb_table[base + 32] = 0xB9B8A6A6; ++ tlb_table[base + 33] = 0xB9B9B9B9; ++ tlb_table[base + 34] = 0xB9B9B9B9; ++ tlb_table[base + 35] = 0xB9B9B9B9; ++ tlb_table[base + 36] = 0xB9B9B9B9; ++ tlb_table[base + 37] = 0xB9B9B9B9; ++ tlb_table[base + 38] = 0xB9B9B9B9; ++ tlb_table[base + 39] = 0xB9B9B9B9; ++ tlb_table[base + 40] = 0xB9B9B9B9; ++ tlb_table[base + 41] = 0xB9B9B9B9; ++ tlb_table[base + 42] = 0xB9B9B9B9; ++ tlb_table[base + 43] = 0xFFB9B9B9; ++ } ++ //Table 1 ++ if (i == 1) { ++ tlb_table[base + 10] = 0x0C110043; ++ tlb_table[base + 11] = 0x0A0D0F0D; ++ tlb_table[base + 12] = 0x0F0E0F11; ++ tlb_table[base + 13] = 0x14111213; ++ tlb_table[base + 14] = 0x1A1C2B1A; ++ tlb_table[base + 15] = 0x351A1818; ++ tlb_table[base + 16] = 0x2B1F2826; ++ tlb_table[base + 17] = 0x4142373F; ++ tlb_table[base + 18] = 0x3C3D373E; ++ tlb_table[base + 19] = 0x55644E46; ++ tlb_table[base + 20] = 0x4B5F4A46; ++ tlb_table[base + 21] = 0x77573D3C; ++ tlb_table[base + 22] = 0x6B675F58; ++ tlb_table[base + 23] = 0x43707170; ++ tlb_table[base + 24] = 0x7A847B54; ++ tlb_table[base + 25] = 0x6E64836D; ++ tlb_table[base + 26] = 0xDBFF6C70; ++ tlb_table[base + 27] = 0x1B014300; ++ tlb_table[base + 28] = 0x22271D1D; ++ tlb_table[base + 29] = 0x2A2A4C27; ++ tlb_table[base + 30] = 0x5B6BA04C; ++ tlb_table[base + 31] = 0xA0A0A06B; ++ tlb_table[base + 32] = 0xA0A0A0A0; ++ tlb_table[base + 33] = 0xA0A0A0A0; ++ tlb_table[base + 34] = 0xA0A0A0A0; ++ tlb_table[base + 35] = 0xA0A0A0A0; ++ tlb_table[base + 36] = 0xA0A0A0A0; ++ tlb_table[base + 37] = 0xA0A0A0A0; ++ tlb_table[base + 38] = 0xA0A0A0A0; ++ tlb_table[base + 39] = 0xA0A0A0A0; ++ tlb_table[base + 40] = 0xA0A0A0A0; ++ tlb_table[base + 41] = 0xA0A0A0A0; ++ tlb_table[base + 42] = 0xA0A0A0A0; ++ tlb_table[base + 43] = 0xFFA0A0A0; ++ } ++ //Table 2 ++ if (i == 2) { ++ tlb_table[base + 10] = 0x090E0043; ++ tlb_table[base + 11] = 0x090A0C0A; ++ tlb_table[base + 12] = 0x0C0B0C0E; ++ tlb_table[base + 13] = 0x110E0F10; ++ tlb_table[base + 14] = 0x15172415; ++ tlb_table[base + 15] = 0x2C151313; ++ tlb_table[base + 16] = 0x241A211F; ++ tlb_table[base + 17] = 0x36372E34; ++ tlb_table[base + 18] = 0x31322E33; ++ tlb_table[base + 19] = 0x4653413A; ++ tlb_table[base + 20] = 0x3E4E3D3A; ++ tlb_table[base + 21] = 0x62483231; ++ tlb_table[base + 22] = 0x58564E49; ++ tlb_table[base + 23] = 0x385D5E5D; ++ tlb_table[base + 24] = 0x656D6645; ++ tlb_table[base + 25] = 0x5B536C5A; ++ tlb_table[base + 26] = 0xDBFF595D; ++ tlb_table[base + 27] = 0x16014300; ++ tlb_table[base + 28] = 0x1C201818; ++ tlb_table[base + 29] = 0x22223F20; ++ tlb_table[base + 30] = 0x4B58853F; ++ tlb_table[base + 31] = 0x85858558; ++ tlb_table[base + 32] = 0x85858585; ++ tlb_table[base + 33] = 0x85858585; ++ tlb_table[base + 34] = 0x85858585; ++ tlb_table[base + 35] = 0x85858585; ++ tlb_table[base + 36] = 0x85858585; ++ tlb_table[base + 37] = 0x85858585; ++ tlb_table[base + 38] = 0x85858585; ++ tlb_table[base + 39] = 0x85858585; ++ tlb_table[base + 40] = 0x85858585; ++ tlb_table[base + 41] = 0x85858585; ++ tlb_table[base + 42] = 0x85858585; ++ tlb_table[base + 43] = 0xFF858585; ++ } ++ //Table 3 ++ if (i == 3) { ++ tlb_table[base + 10] = 0x070B0043; ++ tlb_table[base + 11] = 0x07080A08; ++ tlb_table[base + 12] = 0x0A090A0B; ++ tlb_table[base + 13] = 0x0D0B0C0C; ++ tlb_table[base + 14] = 0x11121C11; ++ tlb_table[base + 15] = 0x23110F0F; ++ tlb_table[base + 16] = 0x1C141A19; ++ tlb_table[base + 17] = 0x2B2B2429; ++ tlb_table[base + 18] = 0x27282428; ++ tlb_table[base + 19] = 0x3842332E; ++ tlb_table[base + 20] = 0x313E302E; ++ tlb_table[base + 21] = 0x4E392827; ++ tlb_table[base + 22] = 0x46443E3A; ++ tlb_table[base + 23] = 0x2C4A4A4A; ++ tlb_table[base + 24] = 0x50565137; ++ tlb_table[base + 25] = 0x48425647; ++ tlb_table[base + 26] = 0xDBFF474A; ++ tlb_table[base + 27] = 0x12014300; ++ tlb_table[base + 28] = 0x161A1313; ++ tlb_table[base + 29] = 0x1C1C331A; ++ tlb_table[base + 30] = 0x3D486C33; ++ tlb_table[base + 31] = 0x6C6C6C48; ++ tlb_table[base + 32] = 0x6C6C6C6C; ++ tlb_table[base + 33] = 0x6C6C6C6C; ++ tlb_table[base + 34] = 0x6C6C6C6C; ++ tlb_table[base + 35] = 0x6C6C6C6C; ++ tlb_table[base + 36] = 0x6C6C6C6C; ++ tlb_table[base + 37] = 0x6C6C6C6C; ++ tlb_table[base + 38] = 0x6C6C6C6C; ++ tlb_table[base + 39] = 0x6C6C6C6C; ++ tlb_table[base + 40] = 0x6C6C6C6C; ++ tlb_table[base + 41] = 0x6C6C6C6C; ++ tlb_table[base + 42] = 0x6C6C6C6C; ++ tlb_table[base + 43] = 0xFF6C6C6C; ++ } ++ //Table 4 ++ if (i == 4) { ++ tlb_table[base + 10] = 0x06090043; ++ tlb_table[base + 11] = 0x05060706; ++ tlb_table[base + 12] = 0x07070709; ++ tlb_table[base + 13] = 0x0A09090A; ++ tlb_table[base + 14] = 0x0D0E160D; ++ tlb_table[base + 15] = 0x1B0D0C0C; ++ tlb_table[base + 16] = 0x16101413; ++ tlb_table[base + 17] = 0x21221C20; ++ tlb_table[base + 18] = 0x1E1F1C20; ++ tlb_table[base + 19] = 0x2B332824; ++ tlb_table[base + 20] = 0x26302624; ++ tlb_table[base + 21] = 0x3D2D1F1E; ++ tlb_table[base + 22] = 0x3735302D; ++ tlb_table[base + 23] = 0x22393A39; ++ tlb_table[base + 24] = 0x3F443F2B; ++ tlb_table[base + 25] = 0x38334338; ++ tlb_table[base + 26] = 0xDBFF3739; ++ tlb_table[base + 27] = 0x0D014300; ++ tlb_table[base + 28] = 0x11130E0E; ++ tlb_table[base + 29] = 0x15152613; ++ tlb_table[base + 30] = 0x2D355026; ++ tlb_table[base + 31] = 0x50505035; ++ tlb_table[base + 32] = 0x50505050; ++ tlb_table[base + 33] = 0x50505050; ++ tlb_table[base + 34] = 0x50505050; ++ tlb_table[base + 35] = 0x50505050; ++ tlb_table[base + 36] = 0x50505050; ++ tlb_table[base + 37] = 0x50505050; ++ tlb_table[base + 38] = 0x50505050; ++ tlb_table[base + 39] = 0x50505050; ++ tlb_table[base + 40] = 0x50505050; ++ tlb_table[base + 41] = 0x50505050; ++ tlb_table[base + 42] = 0x50505050; ++ tlb_table[base + 43] = 0xFF505050; ++ } ++ //Table 5 ++ if (i == 5) { ++ tlb_table[base + 10] = 0x04060043; ++ tlb_table[base + 11] = 0x03040504; ++ tlb_table[base + 12] = 0x05040506; ++ tlb_table[base + 13] = 0x07060606; ++ tlb_table[base + 14] = 0x09090F09; ++ tlb_table[base + 15] = 0x12090808; ++ tlb_table[base + 16] = 0x0F0A0D0D; ++ tlb_table[base + 17] = 0x16161315; ++ tlb_table[base + 18] = 0x14151315; ++ tlb_table[base + 19] = 0x1D221B18; ++ tlb_table[base + 20] = 0x19201918; ++ tlb_table[base + 21] = 0x281E1514; ++ tlb_table[base + 22] = 0x2423201E; ++ tlb_table[base + 23] = 0x17262726; ++ tlb_table[base + 24] = 0x2A2D2A1C; ++ tlb_table[base + 25] = 0x25222D25; ++ tlb_table[base + 26] = 0xDBFF2526; ++ tlb_table[base + 27] = 0x09014300; ++ tlb_table[base + 28] = 0x0B0D0A0A; ++ tlb_table[base + 29] = 0x0E0E1A0D; ++ tlb_table[base + 30] = 0x1F25371A; ++ tlb_table[base + 31] = 0x37373725; ++ tlb_table[base + 32] = 0x37373737; ++ tlb_table[base + 33] = 0x37373737; ++ tlb_table[base + 34] = 0x37373737; ++ tlb_table[base + 35] = 0x37373737; ++ tlb_table[base + 36] = 0x37373737; ++ tlb_table[base + 37] = 0x37373737; ++ tlb_table[base + 38] = 0x37373737; ++ tlb_table[base + 39] = 0x37373737; ++ tlb_table[base + 40] = 0x37373737; ++ tlb_table[base + 41] = 0x37373737; ++ tlb_table[base + 42] = 0x37373737; ++ tlb_table[base + 43] = 0xFF373737; ++ } ++ //Table 6 ++ if (i == 6) { ++ tlb_table[base + 10] = 0x02030043; ++ tlb_table[base + 11] = 0x01020202; ++ tlb_table[base + 12] = 0x02020203; ++ tlb_table[base + 13] = 0x03030303; ++ tlb_table[base + 14] = 0x04040704; ++ tlb_table[base + 15] = 0x09040404; ++ tlb_table[base + 16] = 0x07050606; ++ tlb_table[base + 17] = 0x0B0B090A; ++ tlb_table[base + 18] = 0x0A0A090A; ++ tlb_table[base + 19] = 0x0E110D0C; ++ tlb_table[base + 20] = 0x0C100C0C; ++ tlb_table[base + 21] = 0x140F0A0A; ++ tlb_table[base + 22] = 0x1211100F; ++ tlb_table[base + 23] = 0x0B131313; ++ tlb_table[base + 24] = 0x1516150E; ++ tlb_table[base + 25] = 0x12111612; ++ tlb_table[base + 26] = 0xDBFF1213; ++ tlb_table[base + 27] = 0x04014300; ++ tlb_table[base + 28] = 0x05060505; ++ tlb_table[base + 29] = 0x07070D06; ++ tlb_table[base + 30] = 0x0F121B0D; ++ tlb_table[base + 31] = 0x1B1B1B12; ++ tlb_table[base + 32] = 0x1B1B1B1B; ++ tlb_table[base + 33] = 0x1B1B1B1B; ++ tlb_table[base + 34] = 0x1B1B1B1B; ++ tlb_table[base + 35] = 0x1B1B1B1B; ++ tlb_table[base + 36] = 0x1B1B1B1B; ++ tlb_table[base + 37] = 0x1B1B1B1B; ++ tlb_table[base + 38] = 0x1B1B1B1B; ++ tlb_table[base + 39] = 0x1B1B1B1B; ++ tlb_table[base + 40] = 0x1B1B1B1B; ++ tlb_table[base + 41] = 0x1B1B1B1B; ++ tlb_table[base + 42] = 0x1B1B1B1B; ++ tlb_table[base + 43] = 0xFF1B1B1B; ++ } ++ //Table 7 ++ if (i == 7) { ++ tlb_table[base + 10] = 0x01020043; ++ tlb_table[base + 11] = 0x01010101; ++ tlb_table[base + 12] = 0x01010102; ++ tlb_table[base + 13] = 0x02020202; ++ tlb_table[base + 14] = 0x03030503; ++ tlb_table[base + 15] = 0x06030202; ++ tlb_table[base + 16] = 0x05030404; ++ tlb_table[base + 17] = 0x07070607; ++ tlb_table[base + 18] = 0x06070607; ++ tlb_table[base + 19] = 0x090B0908; ++ tlb_table[base + 20] = 0x080A0808; ++ tlb_table[base + 21] = 0x0D0A0706; ++ tlb_table[base + 22] = 0x0C0B0A0A; ++ tlb_table[base + 23] = 0x070C0D0C; ++ tlb_table[base + 24] = 0x0E0F0E09; ++ tlb_table[base + 25] = 0x0C0B0F0C; ++ tlb_table[base + 26] = 0xDBFF0C0C; ++ tlb_table[base + 27] = 0x03014300; ++ tlb_table[base + 28] = 0x03040303; ++ tlb_table[base + 29] = 0x04040804; ++ tlb_table[base + 30] = 0x0A0C1208; ++ tlb_table[base + 31] = 0x1212120C; ++ tlb_table[base + 32] = 0x12121212; ++ tlb_table[base + 33] = 0x12121212; ++ tlb_table[base + 34] = 0x12121212; ++ tlb_table[base + 35] = 0x12121212; ++ tlb_table[base + 36] = 0x12121212; ++ tlb_table[base + 37] = 0x12121212; ++ tlb_table[base + 38] = 0x12121212; ++ tlb_table[base + 39] = 0x12121212; ++ tlb_table[base + 40] = 0x12121212; ++ tlb_table[base + 41] = 0x12121212; ++ tlb_table[base + 42] = 0x12121212; ++ tlb_table[base + 43] = 0xFF121212; ++ } ++ //Table 8 ++ if (i == 8) { ++ tlb_table[base + 10] = 0x01020043; ++ tlb_table[base + 11] = 0x01010101; ++ tlb_table[base + 12] = 0x01010102; ++ tlb_table[base + 13] = 0x02020202; ++ tlb_table[base + 14] = 0x03030503; ++ tlb_table[base + 15] = 0x06030202; ++ tlb_table[base + 16] = 0x05030404; ++ tlb_table[base + 17] = 0x07070607; ++ tlb_table[base + 18] = 0x06070607; ++ tlb_table[base + 19] = 0x090B0908; ++ tlb_table[base + 20] = 0x080A0808; ++ tlb_table[base + 21] = 0x0D0A0706; ++ tlb_table[base + 22] = 0x0C0B0A0A; ++ tlb_table[base + 23] = 0x070C0D0C; ++ tlb_table[base + 24] = 0x0E0F0E09; ++ tlb_table[base + 25] = 0x0C0B0F0C; ++ tlb_table[base + 26] = 0xDBFF0C0C; ++ tlb_table[base + 27] = 0x02014300; ++ tlb_table[base + 28] = 0x03030202; ++ tlb_table[base + 29] = 0x04040703; ++ tlb_table[base + 30] = 0x080A0F07; ++ tlb_table[base + 31] = 0x0F0F0F0A; ++ tlb_table[base + 32] = 0x0F0F0F0F; ++ tlb_table[base + 33] = 0x0F0F0F0F; ++ tlb_table[base + 34] = 0x0F0F0F0F; ++ tlb_table[base + 35] = 0x0F0F0F0F; ++ tlb_table[base + 36] = 0x0F0F0F0F; ++ tlb_table[base + 37] = 0x0F0F0F0F; ++ tlb_table[base + 38] = 0x0F0F0F0F; ++ tlb_table[base + 39] = 0x0F0F0F0F; ++ tlb_table[base + 40] = 0x0F0F0F0F; ++ tlb_table[base + 41] = 0x0F0F0F0F; ++ tlb_table[base + 42] = 0x0F0F0F0F; ++ tlb_table[base + 43] = 0xFF0F0F0F; ++ } ++ //Table 9 ++ if (i == 9) { ++ tlb_table[base + 10] = 0x01010043; ++ tlb_table[base + 11] = 0x01010101; ++ tlb_table[base + 12] = 0x01010101; ++ tlb_table[base + 13] = 0x01010101; ++ tlb_table[base + 14] = 0x02020302; ++ tlb_table[base + 15] = 0x04020202; ++ tlb_table[base + 16] = 0x03020303; ++ tlb_table[base + 17] = 0x05050405; ++ tlb_table[base + 18] = 0x05050405; ++ tlb_table[base + 19] = 0x07080606; ++ tlb_table[base + 20] = 0x06080606; ++ tlb_table[base + 21] = 0x0A070505; ++ tlb_table[base + 22] = 0x09080807; ++ tlb_table[base + 23] = 0x05090909; ++ tlb_table[base + 24] = 0x0A0B0A07; ++ tlb_table[base + 25] = 0x09080B09; ++ tlb_table[base + 26] = 0xDBFF0909; ++ tlb_table[base + 27] = 0x02014300; ++ tlb_table[base + 28] = 0x02030202; ++ tlb_table[base + 29] = 0x03030503; ++ tlb_table[base + 30] = 0x07080C05; ++ tlb_table[base + 31] = 0x0C0C0C08; ++ tlb_table[base + 32] = 0x0C0C0C0C; ++ tlb_table[base + 33] = 0x0C0C0C0C; ++ tlb_table[base + 34] = 0x0C0C0C0C; ++ tlb_table[base + 35] = 0x0C0C0C0C; ++ tlb_table[base + 36] = 0x0C0C0C0C; ++ tlb_table[base + 37] = 0x0C0C0C0C; ++ tlb_table[base + 38] = 0x0C0C0C0C; ++ tlb_table[base + 39] = 0x0C0C0C0C; ++ tlb_table[base + 40] = 0x0C0C0C0C; ++ tlb_table[base + 41] = 0x0C0C0C0C; ++ tlb_table[base + 42] = 0x0C0C0C0C; ++ tlb_table[base + 43] = 0xFF0C0C0C; ++ } ++ //Table 10 ++ if (i == 10) { ++ tlb_table[base + 10] = 0x01010043; ++ tlb_table[base + 11] = 0x01010101; ++ tlb_table[base + 12] = 0x01010101; ++ tlb_table[base + 13] = 0x01010101; ++ tlb_table[base + 14] = 0x01010201; ++ tlb_table[base + 15] = 0x03010101; ++ tlb_table[base + 16] = 0x02010202; ++ tlb_table[base + 17] = 0x03030303; ++ tlb_table[base + 18] = 0x03030303; ++ tlb_table[base + 19] = 0x04050404; ++ tlb_table[base + 20] = 0x04050404; ++ tlb_table[base + 21] = 0x06050303; ++ tlb_table[base + 22] = 0x06050505; ++ tlb_table[base + 23] = 0x03060606; ++ tlb_table[base + 24] = 0x07070704; ++ tlb_table[base + 25] = 0x06050706; ++ tlb_table[base + 26] = 0xDBFF0606; ++ tlb_table[base + 27] = 0x01014300; ++ tlb_table[base + 28] = 0x01020101; ++ tlb_table[base + 29] = 0x02020402; ++ tlb_table[base + 30] = 0x05060904; ++ tlb_table[base + 31] = 0x09090906; ++ tlb_table[base + 32] = 0x09090909; ++ tlb_table[base + 33] = 0x09090909; ++ tlb_table[base + 34] = 0x09090909; ++ tlb_table[base + 35] = 0x09090909; ++ tlb_table[base + 36] = 0x09090909; ++ tlb_table[base + 37] = 0x09090909; ++ tlb_table[base + 38] = 0x09090909; ++ tlb_table[base + 39] = 0x09090909; ++ tlb_table[base + 40] = 0x09090909; ++ tlb_table[base + 41] = 0x09090909; ++ tlb_table[base + 42] = 0x09090909; ++ tlb_table[base + 43] = 0xFF090909; ++ } ++ //Table 11 ++ if (i == 11) { ++ tlb_table[base + 10] = 0x01010043; ++ tlb_table[base + 11] = 0x01010101; ++ tlb_table[base + 12] = 0x01010101; ++ tlb_table[base + 13] = 0x01010101; ++ tlb_table[base + 14] = 0x01010101; ++ tlb_table[base + 15] = 0x01010101; ++ tlb_table[base + 16] = 0x01010101; ++ tlb_table[base + 17] = 0x01010101; ++ tlb_table[base + 18] = 0x01010101; ++ tlb_table[base + 19] = 0x02020202; ++ tlb_table[base + 20] = 0x02020202; ++ tlb_table[base + 21] = 0x03020101; ++ tlb_table[base + 22] = 0x03020202; ++ tlb_table[base + 23] = 0x01030303; ++ tlb_table[base + 24] = 0x03030302; ++ tlb_table[base + 25] = 0x03020303; ++ tlb_table[base + 26] = 0xDBFF0403; ++ tlb_table[base + 27] = 0x01014300; ++ tlb_table[base + 28] = 0x01010101; ++ tlb_table[base + 29] = 0x01010201; ++ tlb_table[base + 30] = 0x03040602; ++ tlb_table[base + 31] = 0x06060604; ++ tlb_table[base + 32] = 0x06060606; ++ tlb_table[base + 33] = 0x06060606; ++ tlb_table[base + 34] = 0x06060606; ++ tlb_table[base + 35] = 0x06060606; ++ tlb_table[base + 36] = 0x06060606; ++ tlb_table[base + 37] = 0x06060606; ++ tlb_table[base + 38] = 0x06060606; ++ tlb_table[base + 39] = 0x06060606; ++ tlb_table[base + 40] = 0x06060606; ++ tlb_table[base + 41] = 0x06060606; ++ tlb_table[base + 42] = 0x06060606; ++ tlb_table[base + 43] = 0xFF060606; ++ } ++ } ++} ++ ++static inline void ++video_write(struct AstRVAS *pAstRVAS, u32 val, u32 reg) ++{ ++ VIDEO_ENG_DBG("write offset: %x, val: %x\n", reg, val); ++ //Video is lock after reset, need always unlock ++ //unlock ++ writel(VIDEO_PROTECT_UNLOCK, pAstRVAS->video_reg_base); ++ writel(val, pAstRVAS->video_reg_base + reg); ++} ++ ++static inline u32 ++video_read(struct AstRVAS *pAstRVAS, u32 reg) ++{ ++ u32 val = readl(pAstRVAS->video_reg_base + reg); ++ ++ VIDEO_ENG_DBG("read offset: %x, val: %x\n", reg, val); ++ return val; ++} +diff --git a/drivers/soc/aspeed/rvas/video_engine.h b/drivers/soc/aspeed/rvas/video_engine.h +--- a/drivers/soc/aspeed/rvas/video_engine.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video_engine.h 2025-12-23 10:16:21.127032619 +0000 +@@ -0,0 +1,270 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * File Name : video_engines.h ++ * Description : AST2600 video engines ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++#ifndef __VIDEO_ENGINE_H__ ++#define __VIDEO_ENGINE_H__ ++ ++#include "video_ioctl.h" ++#include "hardware_engines.h" ++ ++#define VIDEO_STREAM_BUFFER_SIZE (0x400000) //4M ++#define VIDEO_CAPTURE_BUFFER_SIZE (0xA00000) //10M ++#define VIDEO_JPEG_TABLE_SIZE (0x100000) //1M ++ ++#define SCU_VIDEO_ENGINE_BIT BIT(6) ++#define SCU_VIDEO_CAPTURE_STOP_CLOCK_BIT BIT(3) ++#define SCU_VIDEO_ENGINE_STOP_CLOCK_BIT BIT(1) ++/***********************************************************************/ ++/* Register for VIDEO */ ++#define AST_VIDEO_PROTECT 0x000 /* protection key register */ ++#define AST_VIDEO_SEQ_CTRL 0x004 /* Video Sequence Control register */ ++#define AST_VIDEO_PASS_CTRL 0x008 /* Video Pass 1 Control register */ ++ ++//VR008[5]=1 ++#define AST_VIDEO_DIRECT_BASE 0x00C /* Video Direct Frame buffer mode control Register VR008[5]=1 */ ++#define AST_VIDEO_DIRECT_CTRL 0x010 /* Video Direct Frame buffer mode control Register VR008[5]=1 */ ++ ++//VR008[5]=0 ++#define AST_VIDEO_TIMING_H 0x00C /* Video Timing Generation Setting Register */ ++#define AST_VIDEO_TIMING_V 0x010 /* Video Timing Generation Setting Register */ ++#define AST_VIDEO_SCAL_FACTOR 0x014 /* Video Scaling Factor Register */ ++ ++#define AST_VIDEO_SCALING0 0x018 /* Video Scaling Filter Parameter Register #0 */ ++#define AST_VIDEO_SCALING1 0x01C /* Video Scaling Filter Parameter Register #1 */ ++#define AST_VIDEO_SCALING2 0x020 /* Video Scaling Filter Parameter Register #2 */ ++#define AST_VIDEO_SCALING3 0x024 /* Video Scaling Filter Parameter Register #3 */ ++ ++#define AST_VIDEO_BCD_CTRL 0x02C /* Video BCD Control Register */ ++#define AST_VIDEO_CAPTURE_WIN 0x030 /* Video Capturing Window Setting Register */ ++#define AST_VIDEO_COMPRESS_WIN 0x034 /* Video Compression Window Setting Register */ ++ ++#define AST_VIDEO_COMPRESS_PRO 0x038 /* Video Compression Stream Buffer Processing Offset Register */ ++#define AST_VIDEO_COMPRESS_READ 0x03C /* Video Compression Stream Buffer Read Offset Register */ ++ ++#define AST_VIDEO_JPEG_HEADER_BUFF 0x040 /* Video Based Address of JPEG Header Buffer Register */ ++#define AST_VIDEO_SOURCE_BUFF0 0x044 /* Video Based Address of Video Source Buffer #1 Register */ ++#define AST_VIDEO_SOURCE_SCAN_LINE 0x048 /* Video Scan Line Offset of Video Source Buffer Register */ ++#define AST_VIDEO_SOURCE_BUFF1 0x04C /* Video Based Address of Video Source Buffer #2 Register */ ++#define AST_VIDEO_BCD_BUFF 0x050 /* Video Base Address of BCD Flag Buffer Register */ ++#define AST_VIDEO_STREAM_BUFF 0x054 /* Video Base Address of Compressed Video Stream Buffer Register */ ++#define AST_VIDEO_STREAM_SIZE 0x058 /* Video Stream Buffer Size Register */ ++ ++#define AST_VIDEO_COMPRESS_CTRL 0x060 /* Video Compression Control Register */ ++ ++#define AST_VIDEO_COMPRESS_DATA_COUNT 0x070 /* Video Total Size of Compressed Video Stream Read Back Register */ ++#define AST_VIDEO_COMPRESS_BLOCK_COUNT 0x074 /* Video Total Number of Compressed Video Block Read Back Register */ ++#define AST_VIDEO_COMPRESS_FRAME_END 0x078 /* Video Frame-end offset of compressed video stream buffer read back Register */ ++#define AST_VIDEO_COMPRESS_FRAME_COUNT_RB 0x7C ++#define AST_VIDEO_JPEG_SIZE 0x084 ++ ++#define AST_VIDEO_CTRL 0x300 /* Video Control Register */ ++#define AST_VIDEO_INT_EN 0x304 /* Video interrupt Enable */ ++#define AST_VIDEO_INT_STS 0x308 /* Video interrupt status */ ++#define AST_VIDEO_MODE_DETECT 0x30C /* Video Mode Detection Parameter Register */ ++ ++#define AST_VIDEO_CRC1 0x320 /* Primary CRC Parameter Register */ ++#define AST_VIDEO_CRC2 0x324 /* Second CRC Parameter Register */ ++#define AST_VIDEO_DATA_TRUNCA 0x328 /* Video Data Truncation Register */ ++ ++#define AST_VIDEO_E_SCRATCH_34C 0x34C /* Video Scratch Remap Read Back */ ++#define AST_VIDEO_E_SCRATCH_350 0x350 /* Video Scratch Remap Read Back */ ++#define AST_VIDEO_E_SCRATCH_354 0x354 /* Video Scratch Remap Read Back */ ++ ++//multi jpeg ++#define AST_VIDEO_ENCRYPT_SRAM 0x400 /* Video RC4/AES128 Encryption Key Register #0 ~ #63 */ ++#define AST_VIDEO_MULTI_JPEG_SRAM (AST_VIDEO_ENCRYPT_SRAM) /* Multi JPEG registers */ ++ ++#define REG_32_BIT_SZ_IN_BYTES (sizeof(u32)) ++ ++#define SET_FRAME_W_H(w, h) ((((u32)(h)) & 0x1fff) | ((((u32)(w)) & 0x1fff) << 13)) ++#define SET_FRAME_START_ADDR(addr) ((addr) & 0x7fffff80) ++ ++///////////////////////////////////////////////////////////////////////////// ++ ++/* AST_VIDEO_PROTECT: 0x000 - protection key register */ ++#define VIDEO_PROTECT_UNLOCK 0x1A038AA8 ++ ++/* AST_VIDEO_SEQ_CTRL 0x004 Video Sequence Control register */ ++#define VIDEO_HALT_ENG_STS BIT(21) ++#define VIDEO_COMPRESS_BUSY BIT(18) ++#define VIDEO_CAPTURE_BUSY BIT(16) ++#define VIDEO_HALT_ENG_TRIGGER BIT(12) ++#define VIDEO_COMPRESS_FORMAT_MASK BIT(10) ++#define VIDEO_GET_COMPRESS_FORMAT(x) (((x) >> 10) & 0x3) // 0 YUV444 ++#define VIDEO_COMPRESS_FORMAT(x) ((x) << 10) // 0 YUV444 ++#define YUV420 1 ++ ++#define G5_VIDEO_COMPRESS_JPEG_MODE BIT(13) ++#define VIDEO_YUV2RGB_DITHER_EN BIT(8) ++ ++#define VIDEO_COMPRESS_JPEG_MODE BIT(8) ++ ++//if bit 0 : 1 ++#define VIDEO_INPUT_MODE_CHG_WDT BIT(7) ++#define VIDEO_INSERT_FULL_COMPRESS BIT(6) ++#define VIDEO_AUTO_COMPRESS BIT(5) ++#define VIDEO_COMPRESS_TRIGGER BIT(4) ++#define VIDEO_CAPTURE_MULTI_FRAME BIT(3) ++#define VIDEO_COMPRESS_FORCE_IDLE BIT(2) ++#define VIDEO_CAPTURE_TRIGGER BIT(1) ++#define VIDEO_DETECT_TRIGGER BIT(0) ++ ++#define VIDEO_HALT_ENG_RB BIT(21) ++ ++#define VIDEO_ABCD_CHG_EN BIT(1) ++#define VIDEO_BCD_CHG_EN (1) ++ ++/* AST_VIDEO_PASS_CTRL 0x008 Video Pass1 Control register */ ++#define G6_VIDEO_MULTI_JPEG_FLAG_MODE BIT(31) ++#define G6_VIDEO_MULTI_JPEG_MODE BIT(30) ++#define G6_VIDEO_JPEG__COUNT(x) ((x) << 24) ++#define G6_VIDEO_FRAME_CT_MASK (0x3f << 24) ++//x * source frame rate / 60 ++#define VIDEO_FRAME_RATE_CTRL(x) ((x) << 16) ++#define VIDEO_HSYNC_POLARITY_CTRL BIT(15) ++#define VIDEO_INTERLANCE_MODE BIT(14) ++#define VIDEO_DUAL_EDGE_MODE BIT(13) //0 : Single edage ++#define VIDEO_18BIT_SINGLE_EDGE BIT(12) //0: 24bits ++#define VIDEO_DVO_INPUT_DELAY_MASK (7 << 9) ++#define VIDEO_DVO_INPUT_DELAY(x) ((x) << 9) //0 : no delay , 1: 1ns, 2: 2ns, 3:3ns, 4: inversed clock but no delay ++// if bit 5 : 0 ++#define VIDEO_HW_CURSOR_DIS BIT(8) ++// if bit 5 : 1 ++#define VIDEO_AUTO_FETCH BIT(8) // ++#define VIDEO_CAPTURE_FORMATE_MASK (3 << 6) ++ ++#define VIDEO_SET_CAPTURE_FORMAT(x) ((x) << 6) ++#define JPEG_MODE 1 ++#define RGB_MODE 2 ++#define GRAY_MODE 3 ++#define VIDEO_DIRECT_FETCH BIT(5) ++// if bit 5 : 0 ++#define VIDEO_INTERNAL_DE BIT(4) ++#define VIDEO_EXT_ADC_ATTRIBUTE BIT(3) ++ ++/* AST_VIDEO_DIRECT_CTRL 0x010 Video Direct Frame buffer mode control Register VR008[5]=1 */ ++#define VIDEO_FETCH_TIMING(x) ((x) << 16) ++#define VIDEO_FETCH_LINE_OFFSET(x) ((x) & 0xffff) ++ ++/* AST_VIDEO_CAPTURE_WIN 0x030 Video Capturing Window Setting Register */ ++#define VIDEO_CAPTURE_V(x) ((x) & 0x7ff) ++#define VIDEO_CAPTURE_H(x) (((x) & 0x7ff) << 16) ++ ++/* AST_VIDEO_COMPRESS_WIN 0x034 Video Compression Window Setting Register */ ++#define VIDEO_COMPRESS_V(x) ((x) & 0x7ff) ++#define VIDEO_GET_COMPRESS_V(x) ((x) & 0x7ff) ++#define VIDEO_COMPRESS_H(x) (((x) & 0x7ff) << 16) ++#define VIDEO_GET_COMPRESS_H(x) (((x) >> 16) & 0x7ff) ++ ++/* AST_VIDEO_STREAM_SIZE 0x058 Video Stream Buffer Size Register */ ++#define VIDEO_STREAM_PKT_N(x) ((x) << 3) ++#define STREAM_4_PKTS 0 ++#define STREAM_8_PKTS 1 ++#define STREAM_16_PKTS 2 ++#define STREAM_32_PKTS 3 ++#define STREAM_64_PKTS 4 ++#define STREAM_128_PKTS 5 ++ ++#define VIDEO_STREAM_PKT_SIZE(x) (x) ++#define STREAM_1KB 0 ++#define STREAM_2KB 1 ++#define STREAM_4KB 2 ++#define STREAM_8KB 3 ++#define STREAM_16KB 4 ++#define STREAM_32KB 5 ++#define STREAM_64KB 6 ++#define STREAM_128KB 7 ++ ++/* AST_VIDEO_COMPRESS_CTRL 0x060 Video Compression Control Register */ ++#define VIDEO_DCT_CQT_SELECTION (0xf << 6) // bit 6-9, bit 10 for which quantization is referred ++#define VIDEO_DCT_HQ_CQT_SELECTION (0xf << 27) // bit 27-30, bit 31 for which quantization is referred ++ ++#define VIDEO_HQ_DCT_LUM(x) ((x) << 27) ++#define VIDEO_GET_HQ_DCT_LUM(x) (((x) >> 27) & 0x1f) ++#define VIDEO_HQ_DCT_CHROM(x) ((x) << 22) ++#define VIDEO_GET_HQ_DCT_CHROM(x) (((x) >> 22) & 0x1f) ++#define VIDEO_HQ_DCT_MASK (0x3ff << 22) ++#define VIDEO_DCT_HUFFMAN_ENCODE(x) ((x) << 20) ++#define VIDEO_DCT_RESET BIT(17) ++#define VIDEO_HQ_ENABLE BIT(16) ++#define VIDEO_GET_HQ_ENABLE(x) (((x) >> 16) & 0x1) ++#define VIDEO_DCT_LUM(x) ((x) << 11) ++#define VIDEO_GET_DCT_LUM(x) (((x) >> 11) & 0x1f) ++#define VIDEO_DCT_CHROM(x) ((x) << 6) ++#define VIDEO_GET_DCT_CHROM(x) (((x) >> 6) & 0x1f) ++#define VIDEO_DCT_MASK (0x3ff << 6) ++#define VIDEO_ENCRYP_ENABLE BIT(5) ++#define VIDEO_COMPRESS_QUANTIZ_MODE BIT(2) ++#define VIDEO_4COLOR_VQ_ENCODE BIT(1) ++#define VIDEO_DCT_ONLY_ENCODE (1) ++#define VIDEO_DCT_VQ_MASK (0x3) ++ ++#define VIDEO_CTRL_RC4_TEST_MODE BIT(9) ++#define VIDEO_CTRL_RC4_RST BIT(8) ++ ++#define VIDEO_CTRL_ADDRESS_MAP_MULTI_JPEG (0x3 << 30) ++ ++#define VIDEO_CTRL_DWN_SCALING_MASK (0x3 << 4) ++#define VIDEO_CTRL_DWN_SCALING_ENABLE_LINE_BUFFER BIT(4) ++ ++/* AST_VIDEO_INT_EN 0x304 Video interrupt Enable */ ++/* AST_VIDEO_INT_STS 0x308 Video interrupt status */ ++#define VM_COMPRESS_COMPLETE BIT(17) ++#define VM_CAPTURE_COMPLETE BIT(16) ++ ++#define VIDEO_FRAME_COMPLETE BIT(5) ++#define VIDEO_MODE_DETECT_RDY BIT(4) ++#define VIDEO_COMPRESS_COMPLETE BIT(3) ++#define VIDEO_COMPRESS_PKT_COMPLETE BIT(2) ++#define VIDEO_CAPTURE_COMPLETE BIT(1) ++#define VIDEO_MODE_DETECT_WDT BIT(0) ++ ++/***********************************************************************/ ++struct ast_capture_mode { ++ u8 engine_idx; //set 0: engine 0, engine 1 ++ u8 differential; //set 0: full, 1:diff frame ++ u8 mode_change; //get 0: no, 1:change ++}; ++ ++struct ast_compression_mode { ++ u8 engine_idx; //set 0: engine 0, engine 1 ++ u8 mode_change; //get 0: no, 1:change ++ u32 total_size; //get ++ u32 block_count; //get ++}; ++ ++/***********************************************************************/ ++struct INTERNAL_MODE { ++ u16 HorizontalActive; ++ u16 VerticalActive; ++ u16 RefreshRateIndex; ++ u32 PixelClock; ++}; ++ ++// ioctl functions ++void ioctl_get_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS); ++void ioctl_set_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS); ++void ioctl_get_video_engine_data(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, phys_addr_t dwPhyStreamAddress); ++void ioctl_get_video_engine_data_2700(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, dma_addr_t dwPhyStreamAddress); ++ ++//local functions ++irqreturn_t ast_video_isr(int this_irq, void *dev_id); ++int video_engine_reserveMem(struct AstRVAS *pAstRVAS); ++void enable_video_interrupt(struct AstRVAS *pAstRVAS); ++void disable_video_interrupt(struct AstRVAS *pAstRVAS); ++void video_set_Window(struct AstRVAS *pAstRVAS); ++int free_video_engine_memory(struct AstRVAS *pAstRVAS); ++void video_ctrl_init(struct AstRVAS *pAstRVAS); ++void video_engine_rc4Reset(struct AstRVAS *pAstRVAS); ++void set_direct_mode(struct AstRVAS *pAstRVAS); ++ ++#endif // __VIDEO_ENGINE_H__ +diff --git a/drivers/soc/aspeed/rvas/video_ioctl.h b/drivers/soc/aspeed/rvas/video_ioctl.h +--- a/drivers/soc/aspeed/rvas/video_ioctl.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video_ioctl.h 2025-12-23 10:16:21.128032602 +0000 +@@ -0,0 +1,275 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller. ++ * Refer to the README file included with this package for driver version and adapter compatibility. ++ * ++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved. ++ * ++ */ ++ ++#ifndef _VIDEO_IOCTL_H ++#define _VIDEO_IOCTL_H ++ ++#include ++ ++#define RVAS_MAGIC ('b') ++#define CMD_IOCTL_TURN_LOCAL_MONITOR_ON _IOR(RVAS_MAGIC, IOCTL_TURN_LOCAL_MONITOR_ON, struct RvasIoctl) ++#define CMD_IOCTL_TURN_LOCAL_MONITOR_OFF _IOR(RVAS_MAGIC, IOCTL_TURN_LOCAL_MONITOR_OFF, struct RvasIoctl) ++#define CMD_IOCTL_IS_LOCAL_MONITOR_ENABLED _IOR(RVAS_MAGIC, IOCTL_IS_LOCAL_MONITOR_ENABLED, struct RvasIoctl) ++#define CMD_IOCTL_GET_VIDEO_GEOMETRY _IOWR(RVAS_MAGIC, IOCTL_GET_VIDEO_GEOMETRY, struct RvasIoctl) ++#define CMD_IOCTL_WAIT_FOR_VIDEO_EVENT _IOWR(RVAS_MAGIC, IOCTL_WAIT_FOR_VIDEO_EVENT, struct RvasIoctl) ++#define CMD_IOCTL_GET_GRC_REGIESTERS _IOWR(RVAS_MAGIC, IOCTL_GET_GRC_REGIESTERS, struct RvasIoctl) ++#define CMD_IOCTL_READ_SNOOP_MAP _IOWR(RVAS_MAGIC, IOCTL_READ_SNOOP_MAP, struct RvasIoctl) ++#define CMD_IOCTL_READ_SNOOP_AGGREGATE _IOWR(RVAS_MAGIC, IOCTL_READ_SNOOP_AGGREGATE, struct RvasIoctl) ++#define CMD_IOCTL_FETCH_VIDEO_TILES _IOWR(RVAS_MAGIC, IOCTL_FETCH_VIDEO_TILES, struct RvasIoctl) ++#define CMD_IOCTL_FETCH_VIDEO_SLICES _IOWR(RVAS_MAGIC, IOCTL_FETCH_VIDEO_SLICES, struct RvasIoctl) ++#define CMD_IOCTL_RUN_LENGTH_ENCODE_DATA _IOWR(RVAS_MAGIC, IOCTL_RUN_LENGTH_ENCODE_DATA, struct RvasIoctl) ++#define CMD_IOCTL_FETCH_TEXT_DATA _IOWR(RVAS_MAGIC, IOCTL_FETCH_TEXT_DATA, struct RvasIoctl) ++#define CMD_IOCTL_FETCH_MODE13_DATA _IOWR(RVAS_MAGIC, IOCTL_FETCH_MODE13_DATA, struct RvasIoctl) ++#define CMD_IOCTL_NEW_CONTEXT _IOWR(RVAS_MAGIC, IOCTL_NEW_CONTEXT, struct RvasIoctl) ++#define CMD_IOCTL_DEL_CONTEXT _IOWR(RVAS_MAGIC, IOCTL_DEL_CONTEXT, struct RvasIoctl) ++#define CMD_IOCTL_ALLOC _IOWR(RVAS_MAGIC, IOCTL_ALLOC, struct RvasIoctl) ++#define CMD_IOCTL_FREE _IOWR(RVAS_MAGIC, IOCTL_FREE, struct RvasIoctl) ++#define CMD_IOCTL_SET_TSE_COUNTER _IOWR(RVAS_MAGIC, IOCTL_SET_TSE_COUNTER, struct RvasIoctl) ++#define CMD_IOCTL_GET_TSE_COUNTER _IOWR(RVAS_MAGIC, IOCTL_GET_TSE_COUNTER, struct RvasIoctl) ++#define CMD_IOCTL_VIDEO_ENGINE_RESET _IOWR(RVAS_MAGIC, IOCTL_VIDEO_ENGINE_RESET, struct RvasIoctl) ++//jpeg ++#define CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG _IOW(RVAS_MAGIC, IOCTL_SET_VIDEO_ENGINE_CONFIG, struct VideoConfig*) ++#define CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG _IOW(RVAS_MAGIC, IOCTL_GET_VIDEO_ENGINE_CONFIG, struct VideoConfig*) ++#define CMD_IOCTL_GET_VIDEO_ENGINE_DATA _IOWR(RVAS_MAGIC, IOCTL_GET_VIDEO_ENGINE_DATA, struct MultiJpegConfig*) ++ ++enum HARD_WARE_ENGINE_IOCTL { ++ IOCTL_TURN_LOCAL_MONITOR_ON = 20, //REMOTE VIDEO GENERAL IOCTL ++ IOCTL_TURN_LOCAL_MONITOR_OFF, ++ IOCTL_IS_LOCAL_MONITOR_ENABLED, ++ ++ IOCTL_GET_VIDEO_GEOMETRY = 40, // REMOTE VIDEO ++ IOCTL_WAIT_FOR_VIDEO_EVENT, ++ IOCTL_GET_GRC_REGIESTERS, ++ IOCTL_READ_SNOOP_MAP, ++ IOCTL_READ_SNOOP_AGGREGATE, ++ IOCTL_FETCH_VIDEO_TILES, ++ IOCTL_FETCH_VIDEO_SLICES, ++ IOCTL_RUN_LENGTH_ENCODE_DATA, ++ IOCTL_FETCH_TEXT_DATA, ++ IOCTL_FETCH_MODE13_DATA, ++ IOCTL_NEW_CONTEXT, ++ IOCTL_DEL_CONTEXT, ++ IOCTL_ALLOC, ++ IOCTL_FREE, ++ IOCTL_SET_TSE_COUNTER, ++ IOCTL_GET_TSE_COUNTER, ++ IOCTL_VIDEO_ENGINE_RESET, ++ IOCTL_SET_VIDEO_ENGINE_CONFIG, ++ IOCTL_GET_VIDEO_ENGINE_CONFIG, ++ IOCTL_GET_VIDEO_ENGINE_DATA, ++}; ++ ++enum GraphicsModeType { ++ InvalidMode = 0, TextMode = 1, VGAGraphicsMode = 2, AGAGraphicsMode = 3 ++}; ++ ++enum RVASStatus { ++ SuccessStatus = 0, ++ GenericError = 1, ++ MemoryAllocError = 2, ++ InvalidMemoryHandle = 3, ++ CannotMapMemory = 4, ++ CannotUnMapMemory = 5, ++ TimedOut = 6, ++ InvalidContextHandle = 7, ++ CaptureTimedOut = 8, ++ CompressionTimedOut = 9, ++ HostSuspended ++}; ++ ++enum SelectedByteMode { ++ AllBytesMode = 0, ++ SkipMode = 1, ++ PlanarToPackedMode, ++ PackedToPackedMode, ++ LowByteMode, ++ MiddleByteMode, ++ TopByteMode ++}; ++ ++enum DataProccessMode { ++ NormalTileMode = 0, ++ FourBitPlanarMode = 1, ++ FourBitPackedMode = 2, ++ AttrMode = 3, ++ AsciiOnlyMode = 4, ++ FontFetchMode = 5, ++ SplitByteMode = 6 ++}; ++ ++enum ResetEngineMode { ++ ResetAll = 0, ++ ResetRvasEngine = 1, ++ ResetVeEngine = 2 ++}; ++ ++struct VideoGeometry { ++ u16 wScreenWidth; ++ u16 wScreenHeight; ++ u16 wStride; ++ u8 byBitsPerPixel; ++ u8 byModeID; ++ enum GraphicsModeType gmt; ++}; ++ ++struct EventMap { ++ u32 bPaletteChanged :1; ++ u32 bATTRChanged :1; ++ u32 bSEQChanged :1; ++ u32 bGCTLChanged :1; ++ u32 bCRTCChanged :1; ++ u32 bCRTCEXTChanged :1; ++ u32 bPLTRAMChanged :1; ++ u32 bXCURCOLChanged :1; ++ u32 bXCURCTLChanged :1; ++ u32 bXCURPOSChanged :1; ++ u32 bDoorbellA :1; ++ u32 bDoorbellB :1; ++ u32 bGeometryChanged :1; ++ u32 bSnoopChanged :1; ++ u32 bTextFontChanged :1; ++ u32 bTextATTRChanged :1; ++ u32 bTextASCIIChanged :1; ++}; ++ ++struct FetchMap { ++ //in parameters ++ bool bEnableRLE; ++ u8 bTextAlignDouble; // 0 - 8 byte, 1 - 16 byte ++ u8 byRLETripletCode; ++ u8 byRLERepeatCode; ++ enum DataProccessMode dpm; ++ //out parameters ++ u32 dwFetchSize; ++ u32 dwFetchRLESize; ++ u32 dwCheckSum; ++ bool bRLEFailed; ++ u8 rsvd[3]; ++}; ++ ++struct SnoopAggregate { ++ u64 qwRow; ++ u64 qwCol; ++}; ++ ++struct FetchRegion { ++ u16 wTopY; ++ u16 wLeftX; ++ u16 wBottomY; ++ u16 wRightX; ++}; ++ ++struct FetchOperation { ++ struct FetchRegion fr; ++ enum SelectedByteMode sbm; ++ u32 dwFetchSize; ++ u32 dwFetchRLESize; ++ u32 dwCheckSum; ++ bool bRLEFailed; ++ bool bEnableRLE; ++ u8 byRLETripletCode; ++ u8 byRLERepeatCode; ++ u8 byVGATextAlignment; //0-8bytes, 1-16bytes. ++ u8 rsvd[3]; ++}; ++ ++struct FetchVideoTilesArg { ++ struct VideoGeometry vg; ++ u32 dwTotalOutputSize; ++ u32 cfo; ++ struct FetchOperation pfo[4]; ++}; ++ ++struct FetchVideoSlicesArg { ++ struct VideoGeometry vg; ++ u32 dwSlicedSize; ++ u32 dwSlicedRLESize; ++ u32 dwCheckSum; ++ bool bEnableRLE; ++ bool bRLEFailed; ++ u8 byRLETripletCode; ++ u8 byRLERepeatCode; ++ u8 cBuckets; ++ u8 rsvd[3]; ++ u8 abyBitIndexes[24]; ++ u32 cfr; ++ struct FetchRegion pfr[4]; ++}; ++ ++struct RVASBuffer { ++ void *pv; ++ size_t cb; ++}; ++ ++struct RvasIoctl { ++ enum RVASStatus rs; ++ void *rc; ++ struct RVASBuffer rvb; ++ void *rmh; ++ void *rmh1; ++ void *rmh2; ++ u32 rmh_mem_size; ++ u32 rmh1_mem_size; ++ u32 rmh2_mem_size; ++ struct VideoGeometry vg; ++ struct EventMap em; ++ struct SnoopAggregate sa; ++ union { ++ u32 tse_counter; ++ u32 req_mem_size; ++ u32 encode; ++ u32 time_out; ++ }; ++ u32 rle_len; // RLE Length ++ u32 rle_checksum; ++ struct FetchMap tfm; ++ u8 flag; ++ u8 lms; ++ u8 resetMode; ++ u8 rsvd; ++}; ++ ++// ++// Video Engine ++// ++ ++#define MAX_MULTI_FRAME_CT (32) ++ ++struct VideoConfig { ++ u8 engine; //0: engine 0 - normal engine, engine 1 - VM legacy engine ++ u8 compression_mode; //0:DCT, 1:DCT_VQ mix VQ-2 color, 2:DCT_VQ mix VQ-4 color 9: ++ u8 compression_format; //0:ASPEED 1:JPEG ++ u8 capture_format; //0:CCIR601-2 YUV, 1:JPEG YUV, 2:RGB for ASPEED mode only, 3:Gray ++ u8 rc4_enable; //0:disable 1:enable ++ u8 YUV420_mode; //0:YUV444, 1:YUV420 ++ u8 Visual_Lossless; ++ u8 Y_JPEGTableSelector; ++ u8 AdvanceTableSelector; ++ u8 AutoMode; ++ u8 rsvd[2]; ++ enum RVASStatus rs; ++}; ++ ++struct MultiJpegFrame { ++ u32 dwSizeInBytes; // Image size in bytes ++ u32 dwOffsetInBytes; // Offset in bytes ++ u16 wXPixels; // In: X coordinate ++ u16 wYPixels; // In: Y coordinate ++ u16 wWidthPixels; // In: Width for Fetch ++ u16 wHeightPixels; // In: Height for Fetch ++}; ++ ++struct MultiJpegConfig { ++ unsigned char multi_jpeg_frames; // frame count ++ struct MultiJpegFrame frame[MAX_MULTI_FRAME_CT]; // The Multi Frames ++ void *aStreamHandle; ++ enum RVASStatus rs; ++}; ++ ++#endif // _VIDEO_IOCTL_H +diff --git a/drivers/soc/aspeed/rvas/video_main.c b/drivers/soc/aspeed/rvas/video_main.c +--- a/drivers/soc/aspeed/rvas/video_main.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/soc/aspeed/rvas/video_main.c 2025-12-23 10:16:21.128032602 +0000 +@@ -0,0 +1,1848 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * File Name : video_main.c ++ * Description : AST2600 RVAS hardware engines ++ * ++ * Copyright (C) ASPEED Technology Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "video_ioctl.h" ++#include "hardware_engines.h" ++#include "video.h" ++#include "video_debug.h" ++#include "video_engine.h" ++ ++#define TEST_GRCE_DETECT_RESOLUTION_CHG ++ ++static long video_ioctl(struct file *file, unsigned int cmd, unsigned long arg); ++static int video_open(struct inode *inode, struct file *file); ++static int video_release(struct inode *inode, struct file *file); ++static irqreturn_t fge_handler(int irq, void *dev_id); ++static void video_off(struct AstRVAS *pAstRVAS); ++static void video_on(struct AstRVAS *pAstRVAS); ++ ++static void video_os_init_sleep_struct(struct Video_OsSleepStruct *Sleep); ++static void video_ss_wakeup_on_timeout(struct Video_OsSleepStruct *Sleep); ++static void enable_rvas_engines(struct AstRVAS *pAstRVAS); ++static void video_engine_init(struct AstRVAS *pAstRVAS); ++static void rvas_init(struct AstRVAS *pAstRVAS); ++static void reset_rvas_engine(struct AstRVAS *pAstRVAS); ++static void reset_video_engine(struct AstRVAS *pAstRVAS); ++static void set_FBInfo_size(struct AstRVAS *pAstRVAS, void __iomem *mcr_base); ++ ++static long video_os_sleep_on_timeout(struct Video_OsSleepStruct *Sleep, u8 *Var, long msecs); ++ ++static struct AstRVAS *file_ast_rvas(struct file *file) ++{ ++ return container_of(file->private_data, struct AstRVAS, rvas_dev); ++} ++ ++static long video_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ int iResult = 0; ++ struct RvasIoctl ri; ++ struct VideoConfig video_config; ++ struct MultiJpegConfig multi_jpeg; ++ u8 bVideoCmd = 0; ++ dma_addr_t dw_phys = 0; ++ struct AstRVAS *pAstRVAS = file_ast_rvas(file); ++ ++ VIDEO_DBG("Start\n"); ++ VIDEO_DBG("pAstRVAS: 0x%p\n", pAstRVAS); ++ memset(&ri, 0, sizeof(ri)); ++ ++ if (cmd != CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG && ++ cmd != CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG && ++ cmd != CMD_IOCTL_GET_VIDEO_ENGINE_DATA) { ++ if (raw_copy_from_user(&ri, (void *)arg, sizeof(struct RvasIoctl))) { ++ dev_err(pAstRVAS->pdev, "Copy from user buffer Failed\n"); ++ return -EINVAL; ++ } ++ ++ ri.rs = SuccessStatus; ++ bVideoCmd = 0; ++ } else { ++ bVideoCmd = 1; ++ } ++ ++ VIDEO_DBG(" Command = 0x%x\n", cmd); ++ ++ switch (cmd) { ++ case CMD_IOCTL_TURN_LOCAL_MONITOR_ON: ++ if (pAstRVAS->config->version == 7) ++ ioctl_update_lms_2700(0x1, pAstRVAS); ++ else ++ ioctl_update_lms(0x1, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_TURN_LOCAL_MONITOR_OFF: ++ if (pAstRVAS->config->version == 7) ++ ioctl_update_lms_2700(0x0, pAstRVAS); ++ else ++ ioctl_update_lms(0x0, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_IS_LOCAL_MONITOR_ENABLED: ++ u32 status; ++ ++ if (pAstRVAS->config->version == 7) ++ status = ioctl_get_lm_status_2700(pAstRVAS); ++ else ++ status = ioctl_get_lm_status(pAstRVAS); ++ ++ if (status) ++ ri.lms = 0x1; ++ else ++ ri.lms = 0x0; ++ break; ++ ++ case CMD_IOCTL_GET_VIDEO_GEOMETRY: ++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_GEOMETRY\n"); ++ ioctl_get_video_geometry(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_WAIT_FOR_VIDEO_EVENT: ++ VIDEO_DBG(" Command CMD_IOCTL_WAIT_FOR_VIDEO_EVENT\n"); ++ ioctl_wait_for_video_event(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_GET_GRC_REGIESTERS: ++ VIDEO_DBG(" Command CMD_IOCTL_GET_GRC_REGIESTERS\n"); ++ ioctl_get_grc_register(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_READ_SNOOP_MAP: ++ VIDEO_DBG(" Command CMD_IOCTL_READ_SNOOP_MAP\n"); ++ ioctl_read_snoop_map(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_READ_SNOOP_AGGREGATE: ++ VIDEO_DBG(" Command CMD_IOCTL_READ_SNOOP_AGGREGATE\n"); ++ ioctl_read_snoop_aggregate(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_FETCH_VIDEO_TILES: /// ++ VIDEO_DBG("CMD_IOCTL_FETCH_VIDEO_TILES\n"); ++ ioctl_fetch_video_tiles(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_FETCH_VIDEO_SLICES: ++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_VIDEO_SLICES\n"); ++ ioctl_fetch_video_slices(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_RUN_LENGTH_ENCODE_DATA: ++ VIDEO_DBG(" Command CMD_IOCTL_RUN_LENGTH_ENCODE_DATA\n"); ++ ioctl_run_length_encode_data(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_FETCH_TEXT_DATA: ++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_TEXT_DATA\n"); ++ ioctl_fetch_text_data(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_FETCH_MODE13_DATA: ++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_MODE13_DATA\n"); ++ ioctl_fetch_mode_13_data(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_ALLOC: ++ VIDEO_DBG(" Command CMD_IOCTL_ALLOC\n"); ++ ioctl_alloc(file, &ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_FREE: ++ VIDEO_DBG(" Command CMD_IOCTL_FREE\n"); ++ ioctl_free(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_NEW_CONTEXT: ++ VIDEO_DBG(" Command CMD_IOCTL_NEW_CONTEXT\n"); ++ ioctl_new_context(file, &ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_DEL_CONTEXT: ++ VIDEO_DBG(" Command CMD_IOCTL_DEL_CONTEXT\n"); ++ ioctl_delete_context(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_SET_TSE_COUNTER: ++ VIDEO_DBG(" Command CMD_IOCTL_SET_TSE_COUNTER\n"); ++ ioctl_set_tse_tsicr(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_GET_TSE_COUNTER: ++ VIDEO_DBG(" Command CMD_IOCTL_GET_TSE_COUNTER\n"); ++ ioctl_get_tse_tsicr(&ri, pAstRVAS); ++ break; ++ ++ case CMD_IOCTL_VIDEO_ENGINE_RESET: ++ VIDEO_ENG_DBG(" Command CMD_IOCTL_VIDEO_ENGINE_RESET\n"); ++ ioctl_reset_video_engine(&ri, pAstRVAS); ++ break; ++ case CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG: ++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG\n"); ++ ioctl_get_video_engine_config(&video_config, pAstRVAS); ++ ++ iResult = raw_copy_to_user((void *)arg, &video_config, sizeof(video_config)); ++ break; ++ case CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG: ++ VIDEO_DBG(" Command CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG\n"); ++ iResult = raw_copy_from_user(&video_config, (void *)arg, sizeof(video_config)); ++ ++ ioctl_set_video_engine_config(&video_config, pAstRVAS); ++ break; ++ case CMD_IOCTL_GET_VIDEO_ENGINE_DATA: ++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_ENGINE_DATA\n"); ++ iResult = raw_copy_from_user(&multi_jpeg, (void *)arg, sizeof(multi_jpeg)); ++ dw_phys = get_phys_add_rsvd_mem((u32)multi_jpeg.aStreamHandle, pAstRVAS); ++ VIDEO_DBG("physical stream address: %#llx\n", dw_phys); ++ ++ if (dw_phys == 0) { ++ dev_err(pAstRVAS->pdev, "Error of getting stream buffer address\n"); ++ } else { ++ if (pAstRVAS->config->version == 7) ++ ioctl_get_video_engine_data_2700(&multi_jpeg, pAstRVAS, dw_phys); ++ else ++ ioctl_get_video_engine_data(&multi_jpeg, pAstRVAS, dw_phys); ++ } ++ ++ iResult = raw_copy_to_user((void *)arg, &multi_jpeg, sizeof(multi_jpeg)); ++ break; ++ default: ++ dev_err(pAstRVAS->pdev, "Unknown Ioctl: %#x\n", cmd); ++ iResult = -EINVAL; ++ break; ++ } ++ ++ if (!iResult && !bVideoCmd) ++ if (raw_copy_to_user((void *)arg, &ri, sizeof(struct RvasIoctl))) { ++ dev_err(pAstRVAS->pdev, "Copy to user buffer Failed\n"); ++ iResult = -EINVAL; ++ } ++ ++ return iResult; ++} ++ ++phys_addr_t get_phy_fb_start_address(struct AstRVAS *pAstRVAS) ++{ ++ u32 dw_offset = get_screen_offset(pAstRVAS); ++ ++ pAstRVAS->FBInfo.qwFBPhysStart = (pAstRVAS->config->version == 7) ++ ? DDR_BASE_27 ++ : DDR_BASE; ++ pAstRVAS->FBInfo.qwFBPhysStart += pAstRVAS->FBInfo.dwDRAMSize - pAstRVAS->FBInfo.dwVGASize + dw_offset; ++ if (pAstRVAS->rvas_index == 1) ++ pAstRVAS->FBInfo.qwFBPhysStart -= pAstRVAS->FBInfo.dwVGASize; ++ ++ HW_ENG_DBG("Frame buffer start address: %#x, dram size: %#x, vga size: %#x\n", ++ pAstRVAS->FBInfo.qwFBPhysStart, ++ pAstRVAS->FBInfo.dwDRAMSize, ++ pAstRVAS->FBInfo.dwVGASize); ++ ++ return pAstRVAS->FBInfo.qwFBPhysStart; ++} ++ ++static int video_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ size_t size; ++ u32 dw_index; ++ u8 found = 0; ++ struct AstRVAS *pAstRVAS = file_ast_rvas(file); ++ ++ struct MemoryMapTable **pmmt = pAstRVAS->ppmmtMemoryTable; ++ ++ size = vma->vm_end - vma->vm_start; ++ vma->vm_private_data = pAstRVAS; ++ VIDEO_DBG("vma->vm_start 0x%lx, vma->vm_end 0x%lx, vma->vm_pgoff=0x%llx\n", ++ vma->vm_start, ++ vma->vm_end, ++ vma->vm_pgoff); ++ VIDEO_DBG("(vma->vm_pgoff << PAGE_SHIFT) = 0x%llx\n", (vma->vm_pgoff << PAGE_SHIFT)); ++ for (dw_index = 0; dw_index < MAX_NUM_MEM_TBL; ++dw_index) { ++ if (pmmt[dw_index]) { ++ VIDEO_DBG("index %d, phys_addr=0x%llx, virt_addr=%p, length=0x%x\n", ++ dw_index, ++ pmmt[dw_index]->mem_phys, ++ pmmt[dw_index]->pvVirtualAddr, ++ pmmt[dw_index]->dwLength); ++ if ((vma->vm_pgoff << PAGE_SHIFT) == pmmt[dw_index]->mem_phys) { ++ found = 1; ++ if (size > pmmt[dw_index]->dwLength) { ++ pr_err("required size exceed alloc size\n"); ++ return -EAGAIN; ++ } ++ break; ++ } ++ } ++ } ++ if (!found) { ++ pr_err("no match mem entry\n"); ++ return -EAGAIN; ++ } ++ ++ vm_flags_set(vma, VM_IO); ++ if (pAstRVAS->config->version == 7) ++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ++ else ++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ++ ++ if (io_remap_pfn_range(vma, vma->vm_start, ++ ((u32)vma->vm_pgoff), size, ++ vma->vm_page_prot)) { ++ pr_err("remap_pfn_range fail at %s()\n", __func__); ++ return -EAGAIN; ++ } ++ ++ return 0; ++} ++ ++static int video_open(struct inode *pin, struct file *file) ++{ ++ struct AstRVAS *pAstRVAS = file_ast_rvas(file); ++ ++ VIDEO_DBG("\n"); ++ ++ // make sure the rvas clk is running. ++ // if it's already enabled, clk_enable will just return. ++ clk_enable(pAstRVAS->rvasclk); ++ ++ return 0; ++} ++ ++void free_all_mem_entries(struct AstRVAS *pAstRVAS) ++{ ++ u32 dw_index; ++ struct MemoryMapTable **pmmt = pAstRVAS->ppmmtMemoryTable; ++ void *virt_add; ++ dma_addr_t dw_phys; ++ u32 len; ++ ++ VIDEO_DBG("Removing mem map entries...\n"); ++ for (dw_index = 0; dw_index < MAX_NUM_MEM_TBL; ++dw_index) { ++ if (pmmt[dw_index]) { ++ if (pmmt[dw_index]->mem_phys) { ++ virt_add = get_virt_add_rsvd_mem(dw_index, pAstRVAS); ++ dw_phys = get_phys_add_rsvd_mem(dw_index, pAstRVAS); ++ len = get_len_rsvd_mem(dw_index, pAstRVAS); ++ dma_free_coherent(pAstRVAS->pdev, len, virt_add, dw_phys); ++ } ++ pmmt[dw_index]->pf = NULL; ++ kfree(pmmt[dw_index]); ++ pmmt[dw_index] = NULL; ++ } ++ } ++} ++ ++static int video_release(struct inode *inode, struct file *file) ++{ ++ u32 dw_index; ++ struct AstRVAS *pAstRVAS = file_ast_rvas(file); ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ VIDEO_DBG("Start\n"); ++ ++ free_all_mem_entries(pAstRVAS); ++ ++ VIDEO_DBG("ppctContextTable: 0x%p\n", ppctContextTable); ++ ++ disable_grce_tse_interrupt(pAstRVAS); ++ ++ for (dw_index = 0; dw_index < MAX_NUM_CONTEXT; ++dw_index) { ++ if (ppctContextTable[dw_index]) { ++ VIDEO_DBG("Releasing Context dw_index: %u\n", dw_index); ++ kfree(ppctContextTable[dw_index]); ++ ppctContextTable[dw_index] = NULL; ++ } ++ } ++ enable_grce_tse_interrupt(pAstRVAS); ++ VIDEO_DBG("End\n"); ++ ++ return 0; ++} ++ ++static struct file_operations video_module_ops = { .compat_ioctl = video_ioctl, ++ .unlocked_ioctl = video_ioctl, .open = video_open, .release = ++ video_release, .mmap = video_mmap, .owner = THIS_MODULE, }; ++ ++static struct miscdevice video_misc = { .minor = MISC_DYNAMIC_MINOR, .name = ++ RVAS_DRIVER_NAME, .fops = &video_module_ops, }; ++ ++void ioctl_new_context(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS) ++{ ++ struct ContextTable *pct; ++ ++ VIDEO_DBG("Start\n"); ++ pct = get_new_context_table_entry(pAstRVAS); ++ ++ if (pct) { ++ pct->desc_virt = dma_alloc_coherent(pAstRVAS->pdev, PAGE_SIZE, (dma_addr_t *)&pct->desc_phy, GFP_KERNEL); ++ if (!pct->desc_virt) { ++ pri->rs = MemoryAllocError; ++ return; ++ } ++ pri->rc = pct->rc; ++ } else { ++ pri->rs = MemoryAllocError; ++ } ++ ++ VIDEO_DBG("end: return status: %d\n", pri->rs); ++} ++ ++void ioctl_delete_context(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS) ++{ ++ VIDEO_DBG("Start\n"); ++ ++ VIDEO_DBG("pri->rc: %d\n", pri->rc); ++ if (remove_context_table_entry(pri->rc, pAstRVAS)) { ++ VIDEO_DBG("Success in removing\n"); ++ pri->rs = SuccessStatus; ++ } else { ++ VIDEO_DBG("Failed in removing\n"); ++ pri->rs = InvalidMemoryHandle; ++ } ++} ++ ++int get_mem_entry(struct AstRVAS *pAstRVAS) ++{ ++ int index = 0; ++ u32 dw_size = 0; ++ bool found = false; ++ ++ down(&pAstRVAS->mem_sem); ++ do { ++ if (pAstRVAS->ppmmtMemoryTable[index]) { ++ index++; ++ } else { ++ found = true; ++ break; ++ } ++ ++ } while (!found && (index < MAX_NUM_MEM_TBL)); ++ ++ if (found) { ++ dw_size = sizeof(struct MemoryMapTable); ++ pAstRVAS->ppmmtMemoryTable[index] = kmalloc(dw_size, GFP_KERNEL); ++ if (!pAstRVAS->ppmmtMemoryTable[index]) ++ index = -1; ++ } else { ++ index = -1; ++ } ++ ++ up(&pAstRVAS->mem_sem); ++ return index; ++} ++ ++bool delete_mem_entry(const void *crmh, struct AstRVAS *pAstRVAS) ++{ ++ bool b_ret = false; ++ u32 dw_index = (u32)crmh; ++ ++ VIDEO_DBG("Start, dw_index: %#x\n", dw_index); ++ ++ down(&pAstRVAS->mem_sem); ++ if (dw_index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[dw_index]) { ++ VIDEO_DBG("mem: 0x%p\n", pAstRVAS->ppmmtMemoryTable[dw_index]); ++ kfree(pAstRVAS->ppmmtMemoryTable[dw_index]); ++ pAstRVAS->ppmmtMemoryTable[dw_index] = NULL; ++ b_ret = true; ++ } ++ up(&pAstRVAS->mem_sem); ++ VIDEO_DBG("End\n"); ++ return b_ret; ++} ++ ++void *get_virt_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS) ++{ ++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index]) ++ return pAstRVAS->ppmmtMemoryTable[index]->pvVirtualAddr; ++ ++ return 0; ++} ++ ++dma_addr_t get_phys_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS) ++{ ++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index]) ++ return pAstRVAS->ppmmtMemoryTable[index]->mem_phys; ++ ++ return 0; ++} ++ ++u32 get_len_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS) ++{ ++ u32 len = 0; ++ ++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index]) ++ len = pAstRVAS->ppmmtMemoryTable[index]->dwLength; ++ ++ return len; ++} ++ ++bool virt_is_valid_rsvd_mem(u32 index, u32 size, struct AstRVAS *pAstRVAS) ++{ ++ if (index < MAX_NUM_MEM_TBL && ++ pAstRVAS->ppmmtMemoryTable[index] && ++ pAstRVAS->ppmmtMemoryTable[index]->dwLength) ++ return true; ++ ++ return false; ++} ++ ++void ioctl_alloc(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS) ++{ ++ u32 size; ++ dma_addr_t phys_add = 0; ++ void *virt_add = 0; ++ u32 index = get_mem_entry(pAstRVAS); ++ ++ if (index < 0 || index >= MAX_NUM_MEM_TBL) { ++ pri->rs = MemoryAllocError; ++ return; ++ } ++ if (pri->req_mem_size < PAGE_SIZE) ++ pri->req_mem_size = PAGE_SIZE; ++ ++ size = pri->req_mem_size; ++ ++ VIDEO_DBG("Allocating memory size: 0x%x\n", size); ++ virt_add = dma_alloc_coherent(pAstRVAS->pdev, size, &phys_add, ++ GFP_KERNEL); ++ if (virt_add) { ++ pri->rmh = (void *)index; ++ pri->rvb.pv = (void *)phys_add; ++ pri->rvb.cb = size; ++ pri->rs = SuccessStatus; ++ pAstRVAS->ppmmtMemoryTable[index]->pf = file; ++ pAstRVAS->ppmmtMemoryTable[index]->mem_phys = phys_add; ++ pAstRVAS->ppmmtMemoryTable[index]->pvVirtualAddr = (void *)virt_add; ++ pAstRVAS->ppmmtMemoryTable[index]->dwLength = size; ++ pAstRVAS->ppmmtMemoryTable[index]->byDmaAlloc = 1; ++ } else { ++ if (pAstRVAS->ppmmtMemoryTable[index]) ++ delete_mem_entry((void *)index, pAstRVAS); ++ ++ pr_err("Cannot alloc video destination data buffer\n"); ++ pri->rs = MemoryAllocError; ++ } ++ VIDEO_DBG("Allocated: index: 0x%x phys: %llx cb: 0x%x\n", index, ++ phys_add, pri->rvb.cb); ++} ++ ++void ioctl_free(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS) ++{ ++ void *virt_add = get_virt_add_rsvd_mem((u32)pri->rmh, pAstRVAS); ++ dma_addr_t dw_phys = get_phys_add_rsvd_mem((u32)pri->rmh, pAstRVAS); ++ u32 len = get_len_rsvd_mem((u32)pri->rmh, pAstRVAS); ++ ++ VIDEO_DBG("Start\n"); ++ VIDEO_DBG("Freeing: rmh: 0x%p, phys: 0x%x, size 0x%x virt_add: 0x%p len: %u\n", ++ pri->rmh, dw_phys, pri->rvb.cb, virt_add, len); ++ ++ delete_mem_entry(pri->rmh, pAstRVAS); ++ VIDEO_DBG("After delete_mem_entry\n"); ++ ++ dma_free_coherent(pAstRVAS->pdev, len, ++ virt_add, ++ dw_phys); ++ VIDEO_DBG("After dma_free_coherent\n"); ++} ++ ++//AST2700 has both VGA output and DP out. ++//AST2750 has VGA output for host node 0/VGA0 and DP output for host node 1/VGA1. ++void ioctl_update_lms_2700(u8 lms_on, struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_scu000 = 0; ++ u32 reg_scu448 = 0; ++ u32 reg_scu0C0 = 0; ++ u32 reg_scu0D0 = 0; ++ u32 reg_dptx100 = 0; ++ u32 reg_dptx104 = 0; ++ u32 chip_efuse_option = 0; ++ u32 vga_crt_disbl = 0; ++ u32 vga_pwr_off_vdac = 0; ++ ++ if (pAstRVAS->rvas_index == 0x0) { ++ vga_crt_disbl = VGA0_CRT_DISBL; ++ vga_pwr_off_vdac = VGA0_PWR_OFF_VDAC; ++ } else { ++ vga_crt_disbl = VGA1_CRT_DISBL; ++ vga_pwr_off_vdac = VGA1_PWR_OFF_VDAC; ++ } ++ ++ regmap_read(pAstRVAS->scu, SCU000_Silicon_Revision_ID, ®_scu000); ++ chip_efuse_option = (reg_scu000 & 0xff00) >> 8; ++ regmap_read(pAstRVAS->scu_io, SCU448_Pin_Ctrl, ®_scu448); ++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_scu0C0); ++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_scu0D0); ++ if ((chip_efuse_option == 0 && pAstRVAS->rvas_index == 0x1) || chip_efuse_option == 1) { ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 = readl(pAstRVAS->dp_base + DPTX_Configuration_Register); ++ reg_dptx104 = readl(pAstRVAS->dp_base + DPTX_PHY_Configuration_Register); ++ } ++ } ++ ++ if (lms_on) { ++ if ((reg_scu448 & VGAVS_ENBL_27) == 0 && (reg_scu448 & VGAHS_ENBL_27) == 0) { ++ reg_scu448 |= (VGAVS_ENBL_27 | VGAHS_ENBL_27); ++ regmap_write(pAstRVAS->scu_io, SCU448_Pin_Ctrl, reg_scu448); ++ } ++ if (reg_scu0C0 & vga_crt_disbl) { ++ reg_scu0C0 &= ~vga_crt_disbl; ++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0); ++ } ++ if (reg_scu0D0 & vga_pwr_off_vdac) { ++ reg_scu0D0 &= ~vga_pwr_off_vdac; ++ regmap_write(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, reg_scu0D0); ++ } ++ //dp output ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 |= 1 << AUX_RESETN; ++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register); ++ } ++ } else { //turn off ++ if ((reg_scu448 & VGAVS_ENBL_27) == 1 || (reg_scu448 & VGAHS_ENBL_27) == 1) { ++ reg_scu448 &= ~(VGAVS_ENBL_27 | VGAHS_ENBL_27); ++ regmap_write(pAstRVAS->scu_io, SCU448_Pin_Ctrl, reg_scu448); ++ } ++ if (!(reg_scu0C0 & vga_crt_disbl)) { ++ reg_scu0C0 |= vga_crt_disbl; ++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0); ++ } ++ if (!(reg_scu0D0 & vga_pwr_off_vdac)) { ++ reg_scu0D0 |= vga_pwr_off_vdac; ++ regmap_write(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, reg_scu0D0); ++ } ++ //dp output ++ if ((chip_efuse_option == 0 && pAstRVAS->rvas_index == 0x1) || chip_efuse_option == 1) { ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 &= ~(1 << AUX_RESETN); ++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register); ++ reg_dptx104 &= ~(1 << DP_TX_I_MAIN_ON); ++ writel(reg_dptx104, pAstRVAS->dp_base + DPTX_PHY_Configuration_Register); ++ } ++ } ++ } ++} ++ ++u32 ioctl_get_lm_status_2700(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ ++ regmap_read(pAstRVAS->scu_io, SCU448_Pin_Ctrl, ®_val); ++ if ((reg_val & VGAVS_ENBL_27) == 1 || (reg_val & VGAHS_ENBL_27) == 1) { ++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_val); ++ if (pAstRVAS->rvas_index == 0x0) { ++ if (!(reg_val & VGA0_CRT_DISBL)) { ++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_val); ++ if (!(reg_val & VGA0_PWR_OFF_VDAC)) ++ return 1; ++ } ++ } else { ++ if (!(reg_val & VGA1_CRT_DISBL)) { ++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_val); ++ if (!(reg_val & VGA1_PWR_OFF_VDAC)) ++ return 1; ++ } ++ } ++ } ++ return 0; ++} ++ ++void ioctl_update_lms(u8 lms_on, struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_scu418 = 0; ++ u32 reg_scu0C0 = 0; ++ u32 reg_scu0D0 = 0; ++ u32 reg_dptx100 = 0; ++ u32 reg_dptx104 = 0; ++ ++ regmap_read(pAstRVAS->scu, SCU418_Pin_Ctrl, ®_scu418); ++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_scu0C0); ++ regmap_read(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, ®_scu0D0); ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 = readl(pAstRVAS->dp_base + DPTX_Configuration_Register); ++ reg_dptx104 = readl(pAstRVAS->dp_base + DPTX_PHY_Configuration_Register); ++ } ++ ++ if (lms_on) { ++ if (!(reg_scu418 & (VGAVS_ENBL | VGAHS_ENBL))) { ++ reg_scu418 |= (VGAVS_ENBL | VGAHS_ENBL); ++ regmap_write(pAstRVAS->scu, SCU418_Pin_Ctrl, reg_scu418); ++ } ++ if (reg_scu0C0 & VGA_CRT_DISBL) { ++ reg_scu0C0 &= ~VGA_CRT_DISBL; ++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0); ++ } ++ if (reg_scu0D0 & PWR_OFF_VDAC) { ++ reg_scu0D0 &= ~PWR_OFF_VDAC; ++ regmap_write(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, reg_scu0D0); ++ } ++ //dp output ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 |= 1 << AUX_RESETN; ++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register); ++ } ++ } else { //turn off ++ if (reg_scu418 & (VGAVS_ENBL | VGAHS_ENBL)) { ++ reg_scu418 &= ~(VGAVS_ENBL | VGAHS_ENBL); ++ regmap_write(pAstRVAS->scu, SCU418_Pin_Ctrl, reg_scu418); ++ } ++ if (!(reg_scu0C0 & VGA_CRT_DISBL)) { ++ reg_scu0C0 |= VGA_CRT_DISBL; ++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0); ++ } ++ if (!(reg_scu0D0 & PWR_OFF_VDAC)) { ++ reg_scu0D0 |= PWR_OFF_VDAC; ++ regmap_write(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, reg_scu0D0); ++ } ++ //dp output ++ if (pAstRVAS->dp_base) { ++ reg_dptx100 &= ~(1 << AUX_RESETN); ++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register); ++ reg_dptx104 &= ~(1 << DP_TX_I_MAIN_ON); ++ writel(reg_dptx104, pAstRVAS->dp_base + DPTX_PHY_Configuration_Register); ++ } ++ } ++} ++ ++u32 ioctl_get_lm_status(struct AstRVAS *pAstRVAS) ++{ ++ u32 reg_val = 0; ++ ++ regmap_read(pAstRVAS->scu, SCU418_Pin_Ctrl, ®_val); ++ if (reg_val & (VGAVS_ENBL | VGAHS_ENBL)) { ++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_val); ++ if (!(reg_val & VGA_CRT_DISBL)) ++ regmap_read(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, ®_val); ++ if (!(reg_val & PWR_OFF_VDAC)) ++ return 1; ++ } ++ return 0; ++} ++ ++void init_osr_es(struct AstRVAS *pAstRVAS) ++{ ++ VIDEO_DBG("Start\n"); ++ sema_init(&pAstRVAS->mem_sem, 1); ++ sema_init(&pAstRVAS->context_sem, 1); ++ ++ video_os_init_sleep_struct(&pAstRVAS->video_wait); ++ ++ memset(&pAstRVAS->tfe_engine, 0x00, sizeof(struct EngineInfo)); ++ memset(&pAstRVAS->bse_engine, 0x00, sizeof(struct EngineInfo)); ++ memset(&pAstRVAS->ldma_engine, 0x00, sizeof(struct EngineInfo)); ++ sema_init(&pAstRVAS->tfe_engine.sem, 1); ++ sema_init(&pAstRVAS->bse_engine.sem, 1); ++ sema_init(&pAstRVAS->ldma_engine.sem, 1); ++ video_os_init_sleep_struct(&pAstRVAS->tfe_engine.wait); ++ video_os_init_sleep_struct(&pAstRVAS->bse_engine.wait); ++ video_os_init_sleep_struct(&pAstRVAS->ldma_engine.wait); ++ ++ memset(pAstRVAS->ppctContextTable, 0x00, MAX_NUM_CONTEXT * sizeof(u32)); ++ pAstRVAS->dwMemoryTableSize = MAX_NUM_MEM_TBL; ++ memset(pAstRVAS->ppmmtMemoryTable, 0x00, MAX_NUM_MEM_TBL * sizeof(u32)); ++ VIDEO_DBG("End\n"); ++} ++ ++void release_osr_es(struct AstRVAS *pAstRVAS) ++{ ++ u32 dw_index; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ VIDEO_DBG("Removing contexts...\n"); ++ for (dw_index = 0; dw_index < MAX_NUM_CONTEXT; ++dw_index) { ++ //if (ppctContextTable[dw_index]) { ++ kfree(ppctContextTable[dw_index]); ++ ppctContextTable[dw_index] = NULL; ++ //} kfree(NULL) is safe and this check is probably not require ++ } ++ ++ free_all_mem_entries(pAstRVAS); ++} ++ ++//Retrieve a context entry ++struct ContextTable *get_context_entry(const void *crc, struct AstRVAS *pAstRVAS) ++{ ++ struct ContextTable *pct = NULL; ++ u32 dw_index = (u32)crc; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ if (dw_index < MAX_NUM_CONTEXT && ppctContextTable[dw_index] && ++ ppctContextTable[dw_index]->rc == crc) ++ pct = ppctContextTable[dw_index]; ++ ++ return pct; ++} ++ ++struct ContextTable *get_new_context_table_entry(struct AstRVAS *pAstRVAS) ++{ ++ struct ContextTable *pct = NULL; ++ u32 dw_index = 0; ++ bool b_found = false; ++ u32 dw_size = 0; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ disable_grce_tse_interrupt(pAstRVAS); ++ down(&pAstRVAS->context_sem); ++ while (!b_found && (dw_index < MAX_NUM_CONTEXT)) { ++ if (!(ppctContextTable[dw_index])) ++ b_found = true; ++ else ++ ++dw_index; ++ } ++ if (b_found) { ++ dw_size = sizeof(struct ContextTable); ++ pct = kmalloc(dw_size, GFP_KERNEL); ++ ++ if (pct) { ++ memset(pct, 0x00, sizeof(struct ContextTable)); ++ pct->rc = (void *)dw_index; ++ memset(&pct->aqwSnoopMap, 0xff, ++ sizeof(pct->aqwSnoopMap)); ++ memset(&pct->sa, 0xff, sizeof(pct->sa)); ++ ppctContextTable[dw_index] = pct; ++ } ++ } ++ up(&pAstRVAS->context_sem); ++ enable_grce_tse_interrupt(pAstRVAS); ++ ++ return pct; ++} ++ ++bool remove_context_table_entry(const void *crc, struct AstRVAS *pAstRVAS) ++{ ++ bool b_ret = false; ++ u32 dw_index = (u32)crc; ++ struct ContextTable *ctx_entry; ++ ++ VIDEO_DBG("Start\n"); ++ ++ VIDEO_DBG("dw_index: %u\n", dw_index); ++ ++ if (dw_index < MAX_NUM_CONTEXT) { ++ ctx_entry = pAstRVAS->ppctContextTable[dw_index]; ++ VIDEO_DBG("ctx_entry: 0x%p\n", ctx_entry); ++ ++ if (ctx_entry) { ++ disable_grce_tse_interrupt(pAstRVAS); ++ if (!ctx_entry->desc_virt) { ++ VIDEO_DBG("Removing memory, virt: 0x%p phys: %#x\n", ++ ctx_entry->desc_virt, ++ ctx_entry->desc_phy); ++ ++ dma_free_coherent(pAstRVAS->pdev, PAGE_SIZE, ctx_entry->desc_virt, ctx_entry->desc_phy); ++ } ++ VIDEO_DBG("Removing memory: 0x%p\n", ctx_entry); ++ pAstRVAS->ppctContextTable[dw_index] = NULL; ++ kfree(ctx_entry); ++ b_ret = true; ++ enable_grce_tse_interrupt(pAstRVAS); ++ } ++ } ++ return b_ret; ++} ++ ++void display_event_map(const struct EventMap *pem) ++{ ++ VIDEO_DBG("EM:\n"); ++ VIDEO_DBG("*************************\n"); ++ VIDEO_DBG(" bATTRChanged= %u\n", pem->bATTRChanged); ++ VIDEO_DBG(" bCRTCChanged= %u\n", pem->bCRTCChanged); ++ VIDEO_DBG(" bCRTCEXTChanged= %u\n", pem->bCRTCEXTChanged); ++ VIDEO_DBG(" bDoorbellA= %u\n", pem->bDoorbellA); ++ VIDEO_DBG(" bDoorbellB= %u\n", pem->bDoorbellB); ++ VIDEO_DBG(" bGCTLChanged= %u\n", pem->bGCTLChanged); ++ VIDEO_DBG(" bGeometryChanged= %u\n", pem->bGeometryChanged); ++ VIDEO_DBG(" bPLTRAMChanged= %u\n", pem->bPLTRAMChanged); ++ VIDEO_DBG(" bPaletteChanged= %u\n", pem->bPaletteChanged); ++ VIDEO_DBG(" bSEQChanged= %u\n", pem->bSEQChanged); ++ VIDEO_DBG(" bSnoopChanged= %u\n", pem->bSnoopChanged); ++ VIDEO_DBG(" bTextASCIIChanged= %u\n", pem->bTextASCIIChanged); ++ VIDEO_DBG(" bTextATTRChanged= %u\n", pem->bTextATTRChanged); ++ VIDEO_DBG(" bTextFontChanged= %u\n", pem->bTextFontChanged); ++ VIDEO_DBG(" bXCURCOLChanged= %u\n", pem->bXCURCOLChanged); ++ VIDEO_DBG(" bXCURCTLChanged= %u\n", pem->bXCURCTLChanged); ++ VIDEO_DBG(" bXCURPOSChanged= %u\n", pem->bXCURPOSChanged); ++ VIDEO_DBG("*************************\n"); ++} ++ ++void ioctl_wait_for_video_event(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ union EmDwordUnion eduRequested; ++ union EmDwordUnion eduReturned; ++ union EmDwordUnion eduChanged; ++ struct EventMap anEm; ++ u32 result = 1; ++ int iTimerRemaining = ri->time_out; ++ unsigned long ulTimeStart, ulTimeEnd, ulElapsedTime; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ ++ memset(&anEm, 0x0, sizeof(struct EventMap)); ++ ++ VIDEO_DBG("Calling VideoSleepOnTimeout\n"); ++ ++ eduRequested.em = ri->em; ++ VIDEO_DBG("eduRequested.em:\n"); ++ //display_event_map(&eduRequested.em); ++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived; ++ VIDEO_DBG("eduChanged.em:\n"); ++ //display_event_map(&eduChanged.em); ++ ++ // While event has not occurred and there is still time remaining for wait ++ while (!(eduChanged.dw & eduRequested.dw) && (iTimerRemaining > 0) && ++ result) { ++ pAstRVAS->video_intr_occurred = 0; ++ ulTimeStart = jiffies_to_msecs(jiffies); ++ result = video_os_sleep_on_timeout(&pAstRVAS->video_wait, ++ &pAstRVAS->video_intr_occurred, ++ iTimerRemaining); ++ ulTimeEnd = jiffies_to_msecs(jiffies); ++ ulElapsedTime = (ulTimeEnd - ulTimeStart); ++ iTimerRemaining -= (int)ulElapsedTime; ++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived; ++// VIDEO_DBG("Elapsedtime [%u], timestart[%u], timeend[%u]\n", dwElapsedTime, dwTimeStart, dwTimeEnd); ++ ++ VIDEO_DBG("ulElapsedTime [%lu], ulTimeStart[%lu], ulTimeEnd[%lu]\n", ++ ulElapsedTime, ulTimeStart, ulTimeEnd); ++ VIDEO_DBG("HZ [%ul]\n", HZ); ++ VIDEO_DBG("result [%u], iTimerRemaining [%d]\n", result, ++ iTimerRemaining); ++ } ++ ++ if (result == 0 && ri->time_out != 0) { ++ VIDEO_DBG("IOCTL Timedout\n"); ++ ri->rs = TimedOut; ++ memset(&ri->em, 0x0, sizeof(struct EventMap)); ++ } else { ++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived; ++ VIDEO_DBG("Event Received[%X]\n", eduChanged.dw); ++ // Mask out the changes we are waiting on ++ eduReturned.dw = eduChanged.dw & eduRequested.dw; ++ ++ // Reset flags of changes that have been returned ++ eduChanged.dw &= ~(eduReturned.dw); ++ VIDEO_DBG("Event Reset[%X]\n", eduChanged.dw); ++ ppctContextTable[(int)ri->rc]->emEventReceived = eduChanged.em; ++ ++ // Copy changes back to ri ++ ri->em = eduReturned.em; ++ VIDEO_DBG("ri->em:\n"); ++ //display_event_map(&ri->em); ++ ri->rs = SuccessStatus; ++ VIDEO_DBG("Success [%x]\n", ++ eduReturned.dw); ++ } ++} ++ ++static void update_context_events(struct AstRVAS *pAstRVAS, ++ union EmDwordUnion eduFge_status) ++{ ++ union EmDwordUnion eduEmReceived; ++ u32 dwIter = 0; ++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable; ++ // VIDEO_DBG("Setting up context\n"); ++ for (dwIter = 0; dwIter < MAX_NUM_CONTEXT; ++dwIter) { ++ if (ppctContextTable[dwIter]) { ++ // VIDEO_DBG ("Copying EventMap to RVAS Context\n"); ++ memcpy((void *)&eduEmReceived, ++ (void *)&ppctContextTable[dwIter]->emEventReceived, ++ sizeof(union EmDwordUnion)); ++ eduEmReceived.dw |= eduFge_status.dw; ++ memcpy((void *)&ppctContextTable[dwIter]->emEventReceived, ++ (void *)&eduEmReceived, ++ sizeof(union EmDwordUnion)); ++ } ++ } ++ pAstRVAS->video_intr_occurred = 1; ++ video_ss_wakeup_on_timeout(&pAstRVAS->video_wait); ++} ++ ++static irqreturn_t fge_handler(int irq, void *dev_id) ++{ ++ union EmDwordUnion eduFge_status; ++ u32 tse_sts = 0; ++ u32 dwGRCEStatus = 0; ++ bool bFgeItr = false; ++ bool bTfeItr = false; ++ bool bBSEItr = false; ++ bool bLdmaItr = false; ++ bool vg_changed = false; ++ u32 dw_screen_offset = 0; ++ struct AstRVAS *pAstRVAS = (struct AstRVAS *)dev_id; ++ struct VideoGeometry *cur_vg = NULL; ++ ++ memset(&eduFge_status, 0x0, sizeof(union EmDwordUnion)); ++ bFgeItr = false; ++ ++ // Checking for GRC status changes ++ dwGRCEStatus = readl(pAstRVAS->grce_reg_base + GRCE_STATUS_REGISTER); ++ if (dwGRCEStatus & GRC_INT_STS_MASK) { ++ VIDEO_DBG("GRC Status Changed: %#x\n", dwGRCEStatus); ++ eduFge_status.dw |= dwGRCEStatus & GRC_INT_STS_MASK; ++ bFgeItr = true; ++ ++ if (dwGRCEStatus & 0x30) { ++ dw_screen_offset = get_screen_offset(pAstRVAS); ++ ++ if (pAstRVAS->dwScreenOffset != dw_screen_offset) { ++ pAstRVAS->dwScreenOffset = dw_screen_offset; ++ vg_changed = true; ++ } ++ } ++ } ++ vg_changed |= video_geometry_change(pAstRVAS, dwGRCEStatus); ++ if (vg_changed) { ++ eduFge_status.em.bGeometryChanged = true; ++ bFgeItr = true; ++ set_snoop_engine(vg_changed, pAstRVAS); ++ video_set_Window(pAstRVAS); ++ VIDEO_DBG("Geometry has changed\n"); ++ VIDEO_DBG("Reconfigure TSE\n"); ++ } ++ // Checking and clear TSE Intr Status ++ tse_sts = clear_tse_interrupt(pAstRVAS); ++ ++ if (tse_sts & TSSTS_ALL) { ++ bFgeItr = true; ++ if (tse_sts & (TSSTS_TC_SCREEN0 | TSSTS_TC_SCREEN1)) { ++ eduFge_status.em.bSnoopChanged = 1; ++ cur_vg = &pAstRVAS->current_vg; ++ ++ if (cur_vg->gmt == TextMode) { ++ eduFge_status.em.bTextASCIIChanged = 1; ++ eduFge_status.em.bTextATTRChanged = 1; ++ eduFge_status.em.bTextFontChanged = 1; ++ } ++ } ++ if (tse_sts & TSSTS_ASCII) { ++ //VIDEO_DBG("Text Ascii Changed\n"); ++ eduFge_status.em.bTextASCIIChanged = 1; ++ } ++ ++ if (tse_sts & TSSTS_ATTR) { ++ //VIDEO_DBG("Text Attr Changed\n"); ++ eduFge_status.em.bTextATTRChanged = 1; ++ } ++ ++ if (tse_sts & TSSTS_FONT) { ++ //VIDEO_DBG("Text Font Changed\n"); ++ eduFge_status.em.bTextFontChanged = 1; ++ } ++ } ++ ++ if (clear_ldma_interrupt(pAstRVAS)) { ++ bLdmaItr = true; ++ pAstRVAS->ldma_engine.finished = 1; ++ video_ss_wakeup_on_timeout(&pAstRVAS->ldma_engine.wait); ++ } ++ ++ if (clear_tfe_interrupt(pAstRVAS)) { ++ bTfeItr = true; ++ pAstRVAS->tfe_engine.finished = 1; ++ video_ss_wakeup_on_timeout(&pAstRVAS->tfe_engine.wait); ++ } ++ ++ if (clear_bse_interrupt(pAstRVAS)) { ++ bBSEItr = true; ++ pAstRVAS->bse_engine.finished = 1; ++ video_ss_wakeup_on_timeout(&pAstRVAS->bse_engine.wait); ++ } ++ ++ if (!bFgeItr && !bTfeItr && !bBSEItr && !bLdmaItr) { ++ //VIDEO_DBG(" Unknown Interrupt\n"); ++// VIDEO_DBG("TFE CRT [%#x].", *fge_intr); ++ return IRQ_NONE; ++ } ++ ++ if (bFgeItr) { ++ update_context_events(pAstRVAS, eduFge_status); ++ pAstRVAS->video_intr_occurred = 1; ++ video_ss_wakeup_on_timeout(&pAstRVAS->video_wait); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++/*Sleep and Wakeup Functions*/ ++ ++void video_os_init_sleep_struct(struct Video_OsSleepStruct *Sleep) ++{ ++ init_waitqueue_head(&Sleep->queue); ++ Sleep->Timeout = 0; ++} ++ ++void video_ss_wakeup_on_timeout(struct Video_OsSleepStruct *Sleep) ++{ ++ /* Wakeup Process and Kill timeout handler */ ++ wake_up(&Sleep->queue); ++} ++ ++long video_os_sleep_on_timeout(struct Video_OsSleepStruct *Sleep, u8 *Var, long msecs) ++{ ++ long timeout; /* In jiffies */ ++ u8 *Condition = Var; ++ /* Sleep on the Condition for a wakeup */ ++ timeout = wait_event_interruptible_timeout(Sleep->queue, ++ (*Condition == 1), ++ msecs_to_jiffies(msecs)); ++ ++ return timeout; ++} ++ ++void disable_video_engines(struct AstRVAS *pAstRVAS) ++{ ++ clk_disable(pAstRVAS->eclk); ++ clk_disable(pAstRVAS->vclk); ++} ++ ++void enable_video_engines(struct AstRVAS *pAstRVAS) ++{ ++ clk_enable(pAstRVAS->eclk); ++ clk_enable(pAstRVAS->vclk); ++} ++ ++void disable_rvas_engines(struct AstRVAS *pAstRVAS) ++{ ++ clk_disable(pAstRVAS->rvasclk); ++} ++ ++void enable_rvas_engines(struct AstRVAS *pAstRVAS) ++{ ++ // ast2600 clk enable does ++ // reset engine reset at SCU040 ++ // delay 100 us ++ // enable clock at SCU080 ++ // delay 10ms ++ // disable engine reset at SCU040 ++ ++ // ast2700 clk enable only enable clock at SCU240 ++ clk_enable(pAstRVAS->rvasclk); ++} ++ ++static void reset_rvas_engine(struct AstRVAS *pAstRVAS) ++{ ++ disable_rvas_engines(pAstRVAS); ++ if (pAstRVAS->config->version == 7) ++ reset_control_deassert(pAstRVAS->rvas_reset); ++ enable_rvas_engines(pAstRVAS); ++ rvas_init(pAstRVAS); ++} ++ ++static void video_on(struct AstRVAS *pAstRVAS) ++{ ++ if (pAstRVAS->config->version == 7) { ++ // enable clk ++ regmap_write(pAstRVAS->scu, 0x200, 0x40); ++ mdelay(200); ++ regmap_write(pAstRVAS->scu, 0x244, 0x2); ++ regmap_write(pAstRVAS->scu, 0x244, 0x8); ++ mdelay(100); ++ regmap_write(pAstRVAS->scu, 0x204, 0x40); ++ } else { ++ video_engine_init(pAstRVAS); ++ } ++} ++ ++static void video_off(struct AstRVAS *pAstRVAS) ++{ ++ if (pAstRVAS->config->version == 7) { ++ disable_video_interrupt(pAstRVAS); ++ // stop clock ++ regmap_write(pAstRVAS->scu, 0x240, 0x2); ++ regmap_write(pAstRVAS->scu, 0x240, 0x8); ++ mdelay(100); ++ } else { ++ disable_video_engines(pAstRVAS); ++ enable_video_engines(pAstRVAS); ++ } ++} ++ ++static void reset_video_engine(struct AstRVAS *pAstRVAS) ++{ ++ video_off(pAstRVAS); ++ video_on(pAstRVAS); ++} ++ ++void ioctl_reset_video_engine(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS) ++{ ++ enum ResetEngineMode resetMode = ri->resetMode; ++ ++ switch (resetMode) { ++ case ResetAll: ++ VIDEO_DBG("reset all engine\n"); ++ reset_rvas_engine(pAstRVAS); ++ reset_video_engine(pAstRVAS); ++ break; ++ case ResetRvasEngine: ++ VIDEO_DBG("reset rvas engine\n"); ++ reset_rvas_engine(pAstRVAS); ++ break; ++ case ResetVeEngine: ++ VIDEO_DBG("reset video engine\n"); ++ reset_video_engine(pAstRVAS); ++ break; ++ default: ++ dev_err(pAstRVAS->pdev, "Error resetting: no such mode: %d\n", resetMode); ++ break; ++ } ++ ++ if (ri) ++ ri->rs = SuccessStatus; ++} ++ ++static ssize_t rvas_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) ++{ ++ struct AstRVAS *pAstRVAS = dev_get_drvdata(dev); ++ u32 val = kstrtoul(buf, 10, NULL); ++ ++ if (val) ++ ioctl_reset_video_engine(NULL, pAstRVAS); ++ ++ return count; ++} ++ ++static DEVICE_ATTR_WO(rvas_reset); ++ ++static struct attribute *ast_rvas_attributes[] = { ++ &dev_attr_rvas_reset.attr, ++ NULL ++}; ++ ++static const struct attribute_group rvas_attribute_group = { ++ .attrs = ast_rvas_attributes ++}; ++ ++bool sleep_on_tfe_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys, ++ u32 dwTFEControlR, u32 dwTFERleLimitor, ++ u32 *pdwRLESize, u32 *pdwCheckSum) ++{ ++ void __iomem *addrTFEDTBR = pAstRVAS->fg_reg_base + TFE_Descriptor_Table_Offset; ++ void __iomem *addrTFECR = pAstRVAS->fg_reg_base + TFE_Descriptor_Control_Resgister; ++ void __iomem *addrTFERleL = pAstRVAS->fg_reg_base + TFE_RLE_LIMITOR; ++ void __iomem *addrTFERSTS = pAstRVAS->fg_reg_base + TFE_Status_Register; ++ bool bResult = true; ++ ++ down(&pAstRVAS->tfe_engine.sem); ++ VIDEO_DBG("In Busy Semaphore......\n"); ++ ++ VIDEO_DBG("Before change, TFECR: %#x\n", readl(addrTFECR)); ++ writel(dwTFEControlR, addrTFECR); ++ VIDEO_DBG("After change, TFECR: %#x\n", readl(addrTFECR)); ++ writel(dwTFERleLimitor, addrTFERleL); ++ VIDEO_DBG("dwTFEControlR: %#x\n", dwTFEControlR); ++ VIDEO_DBG("dwTFERleLimitor: %#x\n", dwTFERleLimitor); ++ VIDEO_DBG("desc_addr_phys: %#x\n", desc_addr_phys); ++ // put descriptor add to TBR and Fetch start ++ writel((u32)desc_addr_phys, addrTFEDTBR); ++ //wTFETiles = 1; ++ pAstRVAS->tfe_engine.finished = 0; ++ video_os_sleep_on_timeout(&pAstRVAS->tfe_engine.wait, ++ &pAstRVAS->tfe_engine.finished, ++ TFE_TIMEOUT_IN_MS); ++ ++ if (!pAstRVAS->tfe_engine.finished) { ++ dev_err(pAstRVAS->pdev, "Video TFE failed\n"); ++ writel(0x00, addrTFERSTS); ++ pAstRVAS->tfe_engine.finished = 1; ++ bResult = false; ++ } ++ ++ writel((readl(addrTFECR) & (~0x3)), addrTFECR); // Disable IRQ and Turn off TFE when done ++ *pdwRLESize = readl(pAstRVAS->fg_reg_base + TFE_RLE_Byte_Count); ++ *pdwCheckSum = readl(pAstRVAS->fg_reg_base + TFE_RLE_CheckSum); ++ ++ up(&pAstRVAS->tfe_engine.sem); ++ VIDEO_DBG("Done Busy: bResult: %d\n", bResult); ++ ++ return bResult; ++} ++ ++bool sleep_on_tfe_text_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys, ++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize, ++ u32 *pdwCheckSum) ++{ ++ void __iomem *addrTFEDTBR = pAstRVAS->fg_reg_base + TFE_Descriptor_Table_Offset; ++ void __iomem *addrTFECR = pAstRVAS->fg_reg_base + TFE_Descriptor_Control_Resgister; ++ void __iomem *addrTFERleL = pAstRVAS->fg_reg_base + TFE_RLE_LIMITOR; ++ void __iomem *addrTFERSTS = pAstRVAS->fg_reg_base + TFE_Status_Register; ++ bool bResult = true; ++ ++ down(&pAstRVAS->tfe_engine.sem); ++ VIDEO_DBG("In Busy Semaphore......\n"); ++ ++ VIDEO_DBG("Before change, TFECR: %#x\n", readl(addrTFECR)); ++ writel(dwTFEControlR, addrTFECR); ++ VIDEO_DBG("After change, TFECR: %#x\n", readl(addrTFECR)); ++ writel(dwTFERleLimitor, addrTFERleL); ++ VIDEO_DBG("dwTFEControlR: %#x\n", dwTFEControlR); ++ VIDEO_DBG("dwTFERleLimitor: %#x\n", dwTFERleLimitor); ++ VIDEO_DBG("desc_addr_phys: %#x\n", desc_addr_phys); ++ // put descriptor add to TBR and Fetch start ++ writel((u32)desc_addr_phys, addrTFEDTBR); ++ //wTFETiles = 1; ++ pAstRVAS->tfe_engine.finished = 0; ++ video_os_sleep_on_timeout(&pAstRVAS->tfe_engine.wait, ++ &pAstRVAS->tfe_engine.finished, TFE_TIMEOUT_IN_MS); ++ ++ if (!pAstRVAS->tfe_engine.finished) { ++ dev_err(pAstRVAS->pdev, "Video TFE failed\n"); ++ writel(0x00, addrTFERSTS); ++ pAstRVAS->tfe_engine.finished = 1; ++ bResult = false; ++ } ++ ++ writel((readl(addrTFECR) & (~0x3)), addrTFECR);// Disable IRQ and Turn off TFE when done ++ writel((readl(addrTFERSTS) | 0x2), addrTFERSTS); // clear status bit ++ *pdwRLESize = readl(pAstRVAS->fg_reg_base + TFE_RLE_Byte_Count); ++ *pdwCheckSum = readl(pAstRVAS->fg_reg_base + TFE_RLE_CheckSum); ++ ++ up(&pAstRVAS->tfe_engine.sem); ++ VIDEO_DBG("Done Busy: bResult: %d\n", bResult); ++ ++ return bResult; ++} ++ ++bool sleep_on_bse_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys, ++ struct BSEAggregateRegister aBSEAR, u32 size) ++{ ++ void __iomem *addrBSEDTBR = pAstRVAS->fg_reg_base + BSE_Descriptor_Table_Base_Register; ++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register; ++ void __iomem *addrBSDBS = pAstRVAS->fg_reg_base + BSE_Destination_Buket_Size_Resgister; ++ void __iomem *addrBSBPS0 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_0; ++ void __iomem *addrBSBPS1 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_1; ++ void __iomem *addrBSBPS2 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_2; ++ void __iomem *addrBSESSTS = pAstRVAS->fg_reg_base + BSE_Status_Register; ++ u8 byCounter = 0; ++ bool bResult = true; ++ ++ down(&pAstRVAS->bse_engine.sem); ++ pAstRVAS->bse_engine.finished = 0; ++ ++ // Set BSE Temp buffer address, and clear lower u16 ++ writel(BSE_LMEM_Temp_Buffer_Offset << 16, addrBSCR); ++ writel(readl(addrBSCR) | (aBSEAR.dwBSCR & 0X00000FFF), addrBSCR); ++ writel(aBSEAR.dwBSDBS, addrBSDBS); ++ writel(aBSEAR.adwBSBPS[0], addrBSBPS0); ++ writel(aBSEAR.adwBSBPS[1], addrBSBPS1); ++ writel(aBSEAR.adwBSBPS[2], addrBSBPS2); ++ ++ writel((u32)desc_addr_phys, addrBSEDTBR); ++ ++ while (!pAstRVAS->bse_engine.finished) { ++ VIDEO_DBG("BSE Sleeping...\n"); ++ video_os_sleep_on_timeout(&pAstRVAS->bse_engine.wait, ++ &pAstRVAS->bse_engine.finished, ++ 1000); // loop if bse timedout ++ byCounter++; ++ VIDEO_DBG("Back from BSE Sleeping, finished: %u\n", ++ pAstRVAS->bse_engine.finished); ++ ++ if (byCounter == ENGINE_TIMEOUT_IN_SECONDS) { ++ writel(0x00, addrBSESSTS); ++ pAstRVAS->bse_engine.finished = 1; ++ dev_err(pAstRVAS->pdev, "TIMEOUT::Waiting BSE\n"); ++ bResult = false; ++ } ++ } ++ ++ VIDEO_DBG("*pdwBSESSTS = %#x\n", readl(addrBSESSTS)); ++ writel(readl(addrBSCR) & (~0x3), addrBSCR); ++ ++ up(&pAstRVAS->bse_engine.sem); ++ ++ return bResult; ++} ++ ++void sleep_on_ldma_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys) ++{ ++ void __iomem *addrLDMADTBR = pAstRVAS->fg_reg_base + LDMA_Descriptor_Table_Base_Register; ++ void __iomem *addrLDMAControlR = pAstRVAS->fg_reg_base + LDMA_Control_Register; ++ ++ VIDEO_DBG("In sleepONldma busy\n"); ++ ++ down(&pAstRVAS->ldma_engine.sem); ++ ++ pAstRVAS->ldma_engine.finished = 0; ++ ++ writel(0x83, addrLDMAControlR);// descriptor can only in LMEM FOR LDMA ++ writel((u32)desc_addr_phys, addrLDMADTBR); ++ VIDEO_DBG("LDMA: control [%#x]\n", readl(addrLDMAControlR)); ++ VIDEO_DBG("LDMA: DTBR [%#x]\n", readl(addrLDMADTBR)); ++ ++ while (!pAstRVAS->ldma_engine.finished) ++ video_os_sleep_on_timeout(&pAstRVAS->ldma_engine.wait, (u8 *)&pAstRVAS->ldma_engine.finished, 1000); // loop if bse timedout ++ ++ VIDEO_DBG("LDMA wake up\n"); ++ writel(readl(addrLDMAControlR) & (~0x3), addrLDMAControlR); ++ up(&pAstRVAS->ldma_engine.sem); ++} ++ ++static int video_drv_get_resources(struct platform_device *pdev) ++{ ++ int result = 0; ++ struct resource *io_fg; ++ struct resource *io_grc; ++ struct resource *io_video; ++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev); ++ ++ //get resources from platform ++ io_fg = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ VIDEO_DBG("io_fg: 0x%p\n", io_fg); ++ ++ if (!io_fg) { ++ dev_err(&pdev->dev, "No Frame Grabber IORESOURCE_MEM entry\n"); ++ return -ENOENT; ++ } ++ io_grc = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ VIDEO_DBG("io_grc: 0x%p\n", io_grc); ++ if (!io_grc) { ++ dev_err(&pdev->dev, "No GRCE IORESOURCE_MEM entry\n"); ++ return -ENOENT; ++ } ++ io_video = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ VIDEO_DBG("io_video: 0x%p\n", io_video); ++ if (!io_video) { ++ dev_err(&pdev->dev, "No video compression IORESOURCE_MEM entry\n"); ++ return -ENOENT; ++ } ++ ++ //map resource by device ++ pAstRVAS->fg_reg_base = devm_ioremap_resource(&pdev->dev, io_fg); ++ VIDEO_DBG("fg_reg_base: %p\n", pAstRVAS->fg_reg_base); ++ if (IS_ERR((void *)pAstRVAS->fg_reg_base)) { ++ result = PTR_ERR((void *)pAstRVAS->fg_reg_base); ++ dev_err(&pdev->dev, "Cannot map FG registers\n"); ++ pAstRVAS->fg_reg_base = 0; ++ return result; ++ } ++ pAstRVAS->grce_reg_base = devm_ioremap_resource(&pdev->dev, io_grc); ++ VIDEO_DBG("grce_reg_base: %p\n", pAstRVAS->grce_reg_base); ++ if (IS_ERR((void *)pAstRVAS->grce_reg_base)) { ++ result = PTR_ERR((void *)pAstRVAS->grce_reg_base); ++ dev_err(&pdev->dev, "Cannot map GRC registers\n"); ++ pAstRVAS->grce_reg_base = 0; ++ return result; ++ } ++ pAstRVAS->video_reg_base = devm_ioremap_resource(&pdev->dev, io_video); ++ VIDEO_DBG("video_reg_base: %p\n", pAstRVAS->video_reg_base); ++ if (IS_ERR((void *)pAstRVAS->video_reg_base)) { ++ result = PTR_ERR((void *)pAstRVAS->video_reg_base); ++ dev_err(&pdev->dev, "Cannot map video registers\n"); ++ pAstRVAS->video_reg_base = 0; ++ return result; ++ } ++ ++ pAstRVAS->config = of_device_get_match_data(&pdev->dev); ++ if (!pAstRVAS->config) ++ return -ENODEV; ++ return 0; ++} ++ ++static int video_drv_get_irqs(struct platform_device *pdev) ++{ ++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev); ++ ++ pAstRVAS->irq_fge = platform_get_irq(pdev, 0); ++ VIDEO_DBG("irq_fge: %#x\n", pAstRVAS->irq_fge); ++ if (pAstRVAS->irq_fge < 0) { ++ dev_err(&pdev->dev, "NO FGE irq entry\n"); ++ return -ENOENT; ++ } ++ pAstRVAS->irq_vga = platform_get_irq(pdev, 1); ++ VIDEO_DBG("irq_vga: %#x\n", pAstRVAS->irq_vga); ++ if (pAstRVAS->irq_vga < 0) { ++ dev_err(&pdev->dev, "NO VGA irq entry\n"); ++ return -ENOENT; ++ } ++ pAstRVAS->irq_video = platform_get_irq(pdev, 2); ++ VIDEO_DBG("irq_video: %#x\n", pAstRVAS->irq_video); ++ if (pAstRVAS->irq_video < 0) { ++ dev_err(&pdev->dev, "NO video compression entry\n"); ++ return -ENOENT; ++ } ++ return 0; ++} ++ ++static int video_drv_get_clock(struct platform_device *pdev) ++{ ++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev); ++ ++ pAstRVAS->eclk = devm_clk_get(&pdev->dev, "eclk"); ++ if (IS_ERR(pAstRVAS->eclk)) { ++ dev_err(&pdev->dev, "no eclk clock defined\n"); ++ return PTR_ERR(pAstRVAS->eclk); ++ } ++ ++ clk_prepare_enable(pAstRVAS->eclk); ++ ++ pAstRVAS->vclk = devm_clk_get(&pdev->dev, "vclk"); ++ if (IS_ERR(pAstRVAS->vclk)) { ++ dev_err(&pdev->dev, "no vclk clock defined\n"); ++ return PTR_ERR(pAstRVAS->vclk); ++ } ++ ++ clk_prepare_enable(pAstRVAS->vclk); ++ ++ if (pAstRVAS->config->version == 7) { ++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvasclk"); ++ if (IS_ERR(pAstRVAS->rvasclk)) { ++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvas2clk"); ++ if (IS_ERR(pAstRVAS->rvasclk)) { ++ dev_err(&pdev->dev, "no rvasclk or rvas2clk clock defined\n"); ++ return PTR_ERR(pAstRVAS->rvasclk); ++ } ++ } ++ } else { ++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvasclk-gate"); ++ if (IS_ERR(pAstRVAS->rvasclk)) { ++ dev_err(&pdev->dev, "no rvasclk clock defined\n"); ++ return PTR_ERR(pAstRVAS->rvasclk); ++ } ++ } ++ clk_prepare_enable(pAstRVAS->rvasclk); ++ return 0; ++} ++ ++static int video_drv_map_irqs(struct platform_device *pdev) ++{ ++ int result = 0; ++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev); ++ //Map IRQS to handler ++ VIDEO_DBG("Requesting IRQs, irq_fge: %d, irq_vga: %d, irq_video: %d\n", ++ pAstRVAS->irq_fge, pAstRVAS->irq_vga, pAstRVAS->irq_video); ++ ++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_fge, fge_handler, 0, ++ dev_name(&pdev->dev), pAstRVAS); ++ if (result) { ++ pr_err("Error in requesting IRQ\n"); ++ pr_err("RVAS: Failed request FGE irq %d\n", pAstRVAS->irq_fge); ++ misc_deregister(&pAstRVAS->rvas_dev); ++ return result; ++ } ++ ++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_vga, fge_handler, 0, ++ dev_name(&pdev->dev), pAstRVAS); ++ if (result) { ++ pr_err("Error in requesting IRQ\n"); ++ pr_err("RVAS: Failed request vga irq %d\n", pAstRVAS->irq_vga); ++ misc_deregister(&pAstRVAS->rvas_dev); ++ return result; ++ } ++ ++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_video, ast_video_isr, 0, ++ dev_name(&pdev->dev), pAstRVAS); ++ if (result) { ++ pr_err("Error in requesting IRQ\n"); ++ pr_err("RVAS: Failed request video irq %d\n", pAstRVAS->irq_video); ++ misc_deregister(&pAstRVAS->rvas_dev); ++ return result; ++ } ++ ++ return result; ++} ++ ++// ++// ++// ++static int video_drv_probe(struct platform_device *pdev) ++{ ++ int result = 0; ++ struct AstRVAS *pAstRVAS; ++ struct regmap *sdram_scu; ++ struct device_node *dp_node; ++ struct device_node *edac_node; ++ void __iomem *mcr_base; ++ ++ pr_info("RVAS driver probe\n"); ++ pAstRVAS = devm_kzalloc(&pdev->dev, sizeof(struct AstRVAS), GFP_KERNEL); ++ VIDEO_DBG("pAstRVAS: 0x%llx\n", pAstRVAS); ++ ++ if (!pAstRVAS) { ++ dev_err(pAstRVAS->pdev, "Cannot allocate device structure\n"); ++ return -ENOMEM; ++ } ++ dev_set_drvdata(&pdev->dev, pAstRVAS); ++ pAstRVAS->pdev = (void *)&pdev->dev; ++ ++ // Get resources ++ result = video_drv_get_resources(pdev); ++ if (result < 0) { ++ dev_err(pAstRVAS->pdev, "video_probe: Error getting resources\n"); ++ return result; ++ } ++ ++ if (pAstRVAS->config->version == 7) ++ result = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ else ++ result = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ ++ if (result) { ++ dev_err(&pdev->dev, "Failed to set DMA mask\n"); ++ of_reserved_mem_device_release(&pdev->dev); ++ } ++ ++ //get irqs ++ result = video_drv_get_irqs(pdev); ++ if (result < 0) { ++ dev_err(pAstRVAS->pdev, "video_probe: Error getting irqs\n"); ++ return result; ++ } ++ ++ pAstRVAS->rvas_reset = devm_reset_control_get_by_index(&pdev->dev, 0); ++ if (IS_ERR(pAstRVAS->rvas_reset)) { ++ dev_err(&pdev->dev, "can't get rvas reset\n"); ++ return -ENOENT; ++ } ++ if (pAstRVAS->config->version == 7) ++ reset_control_deassert(pAstRVAS->rvas_reset); ++ pAstRVAS->video_engine_reset = devm_reset_control_get_shared_by_index(&pdev->dev, 1); ++ if (IS_ERR(pAstRVAS->video_engine_reset)) { ++ dev_err(&pdev->dev, "can't get video engine reset\n"); ++ return -ENOENT; ++ } ++ ++ //prepare video engine clock ++ result = video_drv_get_clock(pdev); ++ if (result < 0) { ++ dev_err(pAstRVAS->pdev, "video_probe: Error getting clocks\n"); ++ return result; ++ } ++ ++ dp_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-displayport"); ++ if (!dp_node) { ++ dev_err(&pdev->dev, "cannot find dp node\n"); ++ } else { ++ pAstRVAS->dp_base = of_iomap(dp_node, 0); ++ if (!pAstRVAS->dp_base) ++ dev_err(&pdev->dev, "failed to iomem of display port\n"); ++ } ++ if (pAstRVAS->config->version == 7) { ++ pAstRVAS->FBInfo.dwDRAMSize = 0x40000000; // 1GB ++ // VGA size is fixed with 32MB ++ pAstRVAS->FBInfo.dwVGASize = 0x2000000; ++ } else { ++ edac_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sdram-edac"); ++ if (!edac_node) { ++ dev_err(&pdev->dev, "cannot find edac node\n"); ++ } else { ++ mcr_base = of_iomap(edac_node, 0); ++ if (!mcr_base) ++ dev_err(&pdev->dev, "failed to iomem of MCR\n"); ++ } ++ ++ set_FBInfo_size(pAstRVAS, mcr_base); ++ } ++ //scu ++ if (pAstRVAS->config->version == 7) { ++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2700-scu0"); ++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu); ++ if (IS_ERR(sdram_scu)) { ++ dev_err(&pdev->dev, "failed to find ast2700-scu0 regmap\n"); ++ return PTR_ERR(sdram_scu); ++ } ++ pAstRVAS->scu = sdram_scu; ++ ++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2700-scu1"); ++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu); ++ if (IS_ERR(sdram_scu)) { ++ dev_err(&pdev->dev, "failed to find ast2700-scu0 regmap\n"); ++ return PTR_ERR(sdram_scu); ++ } ++ pAstRVAS->scu_io = sdram_scu; ++ } else { ++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2600-scu"); ++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu); ++ if (IS_ERR(sdram_scu)) { ++ dev_err(&pdev->dev, "failed to find ast2600-scu regmap\n"); ++ return PTR_ERR(sdram_scu); ++ } ++ pAstRVAS->scu = sdram_scu; ++ } ++ pAstRVAS->rvas_dev = video_misc; ++ if (pAstRVAS->config->version == 7) { ++ if (of_alias_get_id(pdev->dev.of_node, "rvas") == 1) { ++ pAstRVAS->rvas_index = 1; ++ pAstRVAS->rvas_dev.name = "rvas1"; ++ } else { ++ pAstRVAS->rvas_index = 0; ++ } ++ } ++ pAstRVAS->rvas_dev.parent = &pdev->dev; ++ result = misc_register(&pAstRVAS->rvas_dev); ++ if (result) { ++ pr_err("Failed in rvas misc device register (err: %d)\n", result); ++ return result; ++ } ++ pr_info("Video misc minor %d\n", pAstRVAS->rvas_dev.minor); ++ VIDEO_DBG("pdev: 0x%llx dev: 0x%llx pAstRVAS: 0x%llx rvas_dev: 0x%llx\n", pdev, ++ &pdev->dev, pAstRVAS, pAstRVAS->rvas_dev); ++ ++ if (sysfs_create_group(&pdev->dev.kobj, &rvas_attribute_group)) { ++ pr_err("Failed in creating group\n"); ++ return -1; ++ } ++ ++ VIDEO_DBG("Disabling interrupts...\n"); ++ disable_grce_tse_interrupt(pAstRVAS); ++ ++ //reserve memory ++ of_reserved_mem_device_init(&pdev->dev); ++ ++ // map irqs to irq_handlers ++ result = video_drv_map_irqs(pdev); ++ if (result < 0) { ++ dev_err(pAstRVAS->pdev, "video_probe: Error mapping irqs\n"); ++ return result; ++ } ++ VIDEO_DBG("After IRQ registration\n"); ++ ++ init_osr_es(pAstRVAS); ++ rvas_init(pAstRVAS); ++ video_engine_reserveMem(pAstRVAS); ++ video_on(pAstRVAS); ++ ++ pr_info("RVAS: driver successfully loaded.\n"); ++ return result; ++} ++ ++static void rvas_init(struct AstRVAS *pAstRVAS) ++{ ++ VIDEO_ENG_DBG("\n"); ++ ++ reset_snoop_engine(pAstRVAS); ++ update_video_geometry(pAstRVAS); ++ ++ set_snoop_engine(true, pAstRVAS); ++ enable_grce_tse_interrupt(pAstRVAS); ++} ++ ++static void video_engine_init(struct AstRVAS *pAstRVAS) ++{ ++ VIDEO_ENG_DBG("\n"); ++ // video engine ++ video_ctrl_init(pAstRVAS); ++ video_engine_rc4Reset(pAstRVAS); ++ set_direct_mode(pAstRVAS); ++ video_set_Window(pAstRVAS); ++ enable_video_interrupt(pAstRVAS); ++} ++ ++static int video_drv_remove(struct platform_device *pdev) ++{ ++ struct AstRVAS *pAstRVAS = NULL; ++ ++ VIDEO_DBG("\n"); ++ pAstRVAS = platform_get_drvdata(pdev); ++ video_off(pAstRVAS); ++ VIDEO_DBG("disable_grce_tse_interrupt...\n"); ++ disable_grce_tse_interrupt(pAstRVAS); ++ disable_video_interrupt(pAstRVAS); ++ ++ sysfs_remove_group(&pdev->dev.kobj, &rvas_attribute_group); ++ ++ VIDEO_DBG("misc_deregister...\n"); ++ misc_deregister(&pAstRVAS->rvas_dev); ++ ++ VIDEO_DBG("Releasing OSRes...\n"); ++ release_osr_es(pAstRVAS); ++ ++ free_video_engine_memory(pAstRVAS); ++ pr_info("RVAS: driver successfully unloaded.\n"); ++ return 0; ++} ++ ++static const u32 ast2400_dram_table[] = { ++ 0x04000000, //64MB ++ 0x08000000, //128MB ++ 0x10000000, //256MB ++ 0x20000000, //512MB ++}; ++ ++static const u32 ast2500_dram_table[] = { ++ 0x08000000, //128MB ++ 0x10000000, //256MB ++ 0x20000000, //512MB ++ 0x40000000, //1024MB ++}; ++ ++static const u32 ast2600_dram_table[] = { ++ 0x10000000, //256MB ++ 0x20000000, //512MB ++ 0x40000000, //1024MB ++ 0x80000000, //2048MB ++}; ++ ++static const u32 aspeed_vga_table[] = { ++ 0x800000, //8MB ++ 0x1000000, //16MB ++ 0x2000000, //32MB ++ 0x4000000, //64MB ++}; ++ ++static const struct aspeed_rvas_config ast2600_config = { ++ .version = 6, ++ .dram_table = ast2600_dram_table, ++}; ++ ++static const struct aspeed_rvas_config ast2700_config = { ++ .version = 7, ++ .dram_table = ast2600_dram_table, ++}; ++ ++static void set_FBInfo_size(struct AstRVAS *pAstRVAS, void __iomem *mcr_base) ++{ ++ u32 reg_mcr004 = readl(mcr_base + MCR_CONF); ++ ++ pAstRVAS->FBInfo.dwDRAMSize = pAstRVAS->config->dram_table[reg_mcr004 & 0x3]; ++ ++ pAstRVAS->FBInfo.dwVGASize = aspeed_vga_table[((reg_mcr004 & 0xC) >> 2)]; ++} ++ ++static const struct of_device_id ast_rvas_match[] = { ++ { .compatible = "aspeed,ast2700-rvas", .data = &ast2700_config }, ++ { .compatible = "aspeed,ast2600-rvas", .data = &ast2600_config }, ++ { }, ++}; ++ ++MODULE_DEVICE_TABLE(of, ast_rvas_match); ++ ++static struct platform_driver video_driver = { ++ .probe = video_drv_probe, ++ .remove = video_drv_remove, ++ .driver = { .of_match_table = of_match_ptr(ast_rvas_match), .name = ++ RVAS_DRIVER_NAME, .owner = THIS_MODULE, }, }; ++ ++module_platform_driver(video_driver); ++ ++MODULE_AUTHOR("ASPEED Technology"); ++MODULE_DESCRIPTION("RVAS video driver module for AST2600"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig +--- a/drivers/spi/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/spi/Kconfig 2025-12-23 10:16:09.442228501 +0000 +@@ -129,6 +129,16 @@ + controller (SPI) for the host firmware. The implementation + only supports SPI NOR. + ++config SPI_ASPEED_TXRX ++ tristate "Aspeed SPI controllers" ++ depends on ARCH_ASPEED || COMPILE_TEST ++ depends on OF ++ help ++ This enables support for SPI controller driver in the Aspeed ++ AST2600, AST2500 and AST2400 SoCs when attached ++ to SPI device chips with half-duplex mode. On AST2700, ++ full-duplex mode can be enabled. ++ + config SPI_ATMEL + tristate "Atmel SPI Controller" + depends on ARCH_AT91 || COMPILE_TEST +diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile +--- a/drivers/spi/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/spi/Makefile 2025-12-23 10:16:13.600158773 +0000 +@@ -22,6 +22,7 @@ + obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o + obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o + obj-$(CONFIG_SPI_ASPEED_SMC) += spi-aspeed-smc.o ++obj-$(CONFIG_SPI_ASPEED_TXRX) += spi-aspeed-txrx.o + obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o + obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o + obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o +diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c +--- a/drivers/spi/spi-aspeed-smc.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/spi/spi-aspeed-smc.c 2025-12-23 10:16:21.071033558 +0000 +@@ -7,10 +7,16 @@ + */ + + #include ++#include ++#include ++#include ++#include + #include ++#include + #include + #include + #include ++#include + #include + #include + +@@ -23,12 +29,21 @@ + /* CE Control Register */ + #define CE_CTRL_REG 0x4 + ++#define INTR_CTRL_STATUS_REG 0x08 ++#define SPI_DMA_STATUS BIT(11) ++#define SPI_DMA_IRQ_STS BIT(19) ++ ++#define CMD_CTRL_REG 0xc ++ + /* CEx Control Register */ + #define CE0_CTRL_REG 0x10 + #define CTRL_IO_MODE_MASK GENMASK(30, 28) + #define CTRL_IO_SINGLE_DATA 0x0 + #define CTRL_IO_DUAL_DATA BIT(29) ++#define CTRL_IO_DUAL_ADDR_DATA GENMASK(29, 28) + #define CTRL_IO_QUAD_DATA BIT(30) ++#define CTRL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28)) ++#define CTRL_IO_QUAD_IO BIT(31) + #define CTRL_COMMAND_SHIFT 16 + #define CTRL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI only */ + #define CTRL_IO_DUMMY_SET(dummy) \ +@@ -42,14 +57,43 @@ + #define CTRL_IO_MODE_WRITE 0x2 + #define CTRL_IO_MODE_USER 0x3 + +-#define CTRL_IO_CMD_MASK 0xf0ff40c3 ++#define CTRL_IO_CMD_MASK 0xf0ff40c7 + + /* CEx Address Decoding Range Register */ + #define CE0_SEGMENT_ADDR_REG 0x30 + ++#define MISC_CTRL_REG 0x54 ++#define SPI_USER_CMD_MODE BIT(27) ++#define SPI_CS_TO_DIS BIT(26) ++#define SPI_UNALGNED_ACCESS BIT(24) ++#define SPI_CS_CONTINUOUS BIT(16) ++#define DUMMY_OUTPUT_DATA GENMASK(7, 0) ++ ++#define HOST_DIRECT_ACCESS_CMD_CTRL4 0x6c ++#define HOST_DIRECT_ACCESS_CMD_CTRL2 0x74 ++ ++#define DMA_HI_ADDR_REG 0x7c ++#define DMA_DRAM_HI_ADDR (0x4) ++ ++#define DMA_CTRL_REG 0x80 ++#define SPI_DMA_ENABLE BIT(0) ++#define SPI_DMA_IRQ_EN BIT(3) ++#define SPI_DAM_GRANT BIT(30) ++#define SPI_DAM_REQUEST BIT(31) ++#define DMA_GET_REQ_MAGIC 0xaeed0000 ++#define DMA_DISCARD_REQ_MAGIC 0xdeea0000 ++ ++#define DMA_FLASH_ADDR_REG 0x84 ++#define DMA_RAM_ADDR_REG 0x88 ++#define DMA_LEN_REG 0x8c ++ + /* CEx Read timing compensation register */ + #define CE0_TIMING_COMPENSATION_REG 0x94 + ++#define ASPEED_SPI_OP_BUF_LEN 0x10000 ++ ++static spinlock_t g_lock; ++ + enum aspeed_spi_ctl_reg_value { + ASPEED_SPI_BASE, + ASPEED_SPI_READ, +@@ -57,6 +101,13 @@ + ASPEED_SPI_MAX, + }; + ++enum aspeed_spi_op_field { ++ SPI_OP_CMD = 1, ++ SPI_OP_ADDR, ++ SPI_OP_DATA, ++ SPI_OP_ALL, ++}; ++ + struct aspeed_spi; + + struct aspeed_spi_chip { +@@ -64,7 +115,8 @@ + u32 cs; + void __iomem *ctl; + void __iomem *ahb_base; +- u32 ahb_window_size; ++ phys_addr_t ahb_base_phy; ++ size_t ahb_window_sz; + u32 ctl_val[ASPEED_SPI_MAX]; + u32 clk_freq; + }; +@@ -78,43 +130,78 @@ + u32 timing; + u32 hclk_mask; + u32 hdiv_max; ++ size_t min_window_sz; ++ u32 ver; + +- u32 (*segment_start)(struct aspeed_spi *aspi, u32 reg); +- u32 (*segment_end)(struct aspeed_spi *aspi, u32 reg); +- u32 (*segment_reg)(struct aspeed_spi *aspi, u32 start, u32 end); ++ int (*adjust_window)(struct aspeed_spi *aspi); ++ u64 (*segment_start)(struct aspeed_spi *aspi, u32 reg); ++ u64 (*segment_end)(struct aspeed_spi *aspi, u32 reg); ++ u32 (*segment_reg)(struct aspeed_spi *aspi, u64 start, u64 end); ++ u32 (*get_clk_div)(struct aspeed_spi_chip *chip, u32 hz); + int (*calibrate)(struct aspeed_spi_chip *chip, u32 hdiv, + const u8 *golden_buf, u8 *test_buf); ++ void (*safs_init)(struct aspeed_spi *aspi, struct spi_mem_op *op); ++ void (*safs_start)(struct aspeed_spi *aspi); ++ void (*safs_stop)(struct aspeed_spi *aspi); + }; + + #define ASPEED_SPI_MAX_NUM_CS 5 + ++#define ASPEED_SPI_NORMAL_MODE 0x00000001 ++#define ASPEED_SPI_DMA_WRITE_MODE 0x00000002 ++#define ASPEED_SPI_FIXED_LOW_W_CLK 0x00000004 ++#define ASPEED_SPI_MIN_WINDOW 0x00000008 ++#define ASPEED_SPI_DMA_MODE 0x00000010 ++#define ASPEED_SPI_PURE_USER_MODE 0x00000020 ++#define ASPEED_SPI_TIMING_CLB_DISABLED 0x00000040 ++#define ASPEED_SPI_LTPI_SUPPORT 0x00000080 ++#define ASPEED_SPI_QUAD_ADDR_SUPPORT 0x00000100 ++ + struct aspeed_spi { + const struct aspeed_spi_data *data; + + void __iomem *regs; +- void __iomem *ahb_base; +- u32 ahb_base_phy; +- u32 ahb_window_size; ++ phys_addr_t ahb_base_phy; ++ u64 ltpi_base_phy; ++ size_t ahb_window_sz; ++ u32 num_cs; + struct device *dev; + + struct clk *clk; + u32 clk_freq; + + struct aspeed_spi_chip chips[ASPEED_SPI_MAX_NUM_CS]; ++ ++ int irq; ++ struct completion dma_done; ++ dma_addr_t dma_addr_phy; ++ void __iomem *op_buf; ++ u32 flag; + }; + +-static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op) ++static u32 aspeed_spi_get_io_mode(const struct spi_mem_op *op, ++ enum aspeed_spi_op_field field) + { +- switch (op->data.buswidth) { +- case 1: +- return CTRL_IO_SINGLE_DATA; +- case 2: +- return CTRL_IO_DUAL_DATA; +- case 4: +- return CTRL_IO_QUAD_DATA; +- default: +- return CTRL_IO_SINGLE_DATA; ++ if (field == SPI_OP_ALL || field == SPI_OP_CMD) { ++ if (op->cmd.buswidth == 4) ++ return CTRL_IO_QUAD_IO; + } ++ ++ if (field == SPI_OP_ALL || field == SPI_OP_ADDR) { ++ if (op->addr.buswidth == 4) ++ return CTRL_IO_QUAD_ADDR_DATA; ++ else if (op->addr.buswidth == 2) ++ return CTRL_IO_DUAL_ADDR_DATA; ++ } ++ ++ if (field == SPI_OP_ALL || field == SPI_OP_DATA) { ++ if (op->data.buswidth == 4) ++ return CTRL_IO_QUAD_DATA; ++ else if (op->data.buswidth == 2) ++ return CTRL_IO_DUAL_DATA; ++ } ++ ++ return CTRL_IO_SINGLE_DATA; + } + + static void aspeed_spi_set_io_mode(struct aspeed_spi_chip *chip, u32 io_mode) +@@ -137,11 +224,12 @@ + + ctl &= ~CTRL_CE_STOP_ACTIVE; + writel(ctl, chip->ctl); ++ readl(chip->ctl); + } + + static void aspeed_spi_stop_user(struct aspeed_spi_chip *chip) + { +- u32 ctl = chip->ctl_val[ASPEED_SPI_READ] | ++ u32 ctl = chip->ctl_val[ASPEED_SPI_BASE] | + CTRL_IO_MODE_USER | CTRL_CE_STOP_ACTIVE; + + writel(ctl, chip->ctl); +@@ -178,8 +266,13 @@ + return 0; + } + +-static int aspeed_spi_send_cmd_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes, +- u64 offset, u32 opcode) ++static void aspeed_spi_send_cmd(struct aspeed_spi_chip *chip, u8 opcode) ++{ ++ aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1); ++} ++ ++static int aspeed_spi_send_addr(struct aspeed_spi_chip *chip, u8 addr_nbytes, ++ u64 offset) + { + __be32 temp; + u32 cmdaddr; +@@ -187,20 +280,18 @@ + switch (addr_nbytes) { + case 3: + cmdaddr = offset & 0xFFFFFF; +- cmdaddr |= opcode << 24; +- +- temp = cpu_to_be32(cmdaddr); +- aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); ++ temp = cpu_to_be32(cmdaddr) >> 8; ++ aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 3); + break; + case 4: + temp = cpu_to_be32(offset); +- aspeed_spi_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_spi_write_to_ahb(chip->ahb_base, &temp, 4); + break; + default: + WARN_ONCE(1, "Unexpected address width %u", addr_nbytes); + return -EOPNOTSUPP; + } ++ + return 0; + } + +@@ -230,27 +321,36 @@ + const struct spi_mem_op *op, + u64 offset, size_t len, void *buf) + { +- int io_mode = aspeed_spi_get_io_mode(op); ++ int io_mode; + u8 dummy = 0xFF; + int i; + int ret; + + aspeed_spi_start_user(chip); + +- ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_CMD); ++ aspeed_spi_set_io_mode(chip, io_mode); ++ aspeed_spi_send_cmd(chip, op->cmd.opcode); ++ ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_ADDR); ++ aspeed_spi_set_io_mode(chip, io_mode); ++ ret = aspeed_spi_send_addr(chip, op->addr.nbytes, op->addr.val); + if (ret < 0) + goto stop_user; + + if (op->dummy.buswidth && op->dummy.nbytes) { +- for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) +- aspeed_spi_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy)); ++ for (i = 0; i < op->dummy.nbytes; i++) ++ aspeed_spi_write_to_ahb(chip->ahb_base, ++ &dummy, sizeof(dummy)); + } + ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_DATA); + aspeed_spi_set_io_mode(chip, io_mode); +- + aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); ++ + stop_user: + aspeed_spi_stop_user(chip); ++ + return ret; + } + +@@ -258,32 +358,51 @@ + const struct spi_mem_op *op) + { + int ret; ++ int io_mode; + + aspeed_spi_start_user(chip); +- ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); ++ ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_CMD); ++ aspeed_spi_set_io_mode(chip, io_mode); ++ aspeed_spi_send_cmd(chip, op->cmd.opcode); ++ ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_ADDR); ++ aspeed_spi_set_io_mode(chip, io_mode); ++ ret = aspeed_spi_send_addr(chip, op->addr.nbytes, op->addr.val); + if (ret < 0) + goto stop_user; ++ ++ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_DATA); ++ aspeed_spi_set_io_mode(chip, io_mode); + aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); ++ + stop_user: + aspeed_spi_stop_user(chip); ++ + return ret; + } + + /* support for 1-1-1, 1-1-2 or 1-1-4 */ + static bool aspeed_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) + { ++ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller); ++ + if (op->cmd.buswidth > 1) + return false; + + if (op->addr.nbytes != 0) { +- if (op->addr.buswidth > 1) ++ if (op->addr.buswidth > 1 && ++ !(aspi->flag & ASPEED_SPI_QUAD_ADDR_SUPPORT)) + return false; + if (op->addr.nbytes < 3 || op->addr.nbytes > 4) + return false; + } + + if (op->dummy.nbytes != 0) { +- if (op->dummy.buswidth > 1 || op->dummy.nbytes > 7) ++ if (op->dummy.buswidth > 1 && ++ !(aspi->flag & ASPEED_SPI_QUAD_ADDR_SUPPORT)) ++ return false; ++ if (op->dummy.nbytes > 7) + return false; + } + +@@ -294,13 +413,12 @@ + } + + static const struct aspeed_spi_data ast2400_spi_data; ++static const struct aspeed_spi_data ast2500_fmc_data; + + static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) + { + struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller); + struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)]; +- u32 addr_mode, addr_mode_backup; +- u32 ctl_val; + int ret = 0; + + dev_dbg(aspi->dev, +@@ -310,39 +428,14 @@ + op->dummy.buswidth, op->data.buswidth, + op->addr.nbytes, op->dummy.nbytes, op->data.nbytes); + +- addr_mode = readl(aspi->regs + CE_CTRL_REG); +- addr_mode_backup = addr_mode; +- +- ctl_val = chip->ctl_val[ASPEED_SPI_BASE]; +- ctl_val &= ~CTRL_IO_CMD_MASK; +- +- ctl_val |= op->cmd.opcode << CTRL_COMMAND_SHIFT; +- +- /* 4BYTE address mode */ +- if (op->addr.nbytes) { +- if (op->addr.nbytes == 4) +- addr_mode |= (0x11 << chip->cs); +- else +- addr_mode &= ~(0x11 << chip->cs); +- +- if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) +- ctl_val |= CTRL_IO_ADDRESS_4B; +- } +- +- if (op->dummy.nbytes) +- ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); +- +- if (op->data.nbytes) +- ctl_val |= aspeed_spi_get_io_mode(op); +- +- if (op->data.dir == SPI_MEM_DATA_OUT) +- ctl_val |= CTRL_IO_MODE_WRITE; +- else +- ctl_val |= CTRL_IO_MODE_READ; ++ /* no operation for AST2500 when SW reset is executed */ ++ if (aspi->data == &ast2500_fmc_data && ++ (op->cmd.opcode == SPINOR_OP_SRSTEN || ++ op->cmd.opcode == SPINOR_OP_SRST)) ++ return ret; + +- if (addr_mode != addr_mode_backup) +- writel(addr_mode, aspi->regs + CE_CTRL_REG); +- writel(ctl_val, chip->ctl); ++ if (aspi->data->safs_stop) ++ aspi->data->safs_stop(aspi); + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (!op->addr.nbytes) +@@ -357,13 +450,524 @@ + ret = aspeed_spi_write_user(chip, op); + } + ++ if (aspi->data->safs_start) ++ aspi->data->safs_start(aspi); ++ + /* Restore defaults */ +- if (addr_mode != addr_mode_backup) +- writel(addr_mode_backup, aspi->regs + CE_CTRL_REG); + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); + return ret; + } + ++/* ++ * If the slave device is SPI NOR flash, there are two types ++ * of command mode for ASPEED SPI memory controller used to ++ * transfer data. The first one is user mode and the other is ++ * normal read/write mode. With user mode, SPI NOR flash ++ * command, address and data processes are all handled by CPU. ++ * With normal read/write mode, we can easily read/write data ++ * to flash by reading or writing related remapped address, ++ * then, SPI NOR flash command and address will be transferred ++ * to flash by controller automatically. Besides, ASPEED SPI ++ * memory controller can also block address or data bytes by ++ * configure FMC0C/SPIR0C address and data mask register in ++ * order to satisfy the following SPI flash operation sequences: ++ * (command) only, (command and address) only or ++ * (coommand and data) only. ++ */ ++static int aspeed_spi_exec_op_normal_mode(struct spi_mem *mem, ++ const struct spi_mem_op *op) ++{ ++ struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller); ++ struct device *dev = aspi->dev; ++ struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)]; ++ u32 cs = spi_get_chipselect(mem->spi, 0); ++ u32 ctrl_val; ++ u32 addr_mode_reg, addr_mode_reg_backup; ++ u32 addr_data_mask = 0; ++ void __iomem *op_addr; ++ const void *data_buf; ++ u32 data_byte = 0; ++ u32 dummy_data = 0; ++ unsigned long flags; ++ ++ dev_dbg(dev, "cs:%d, cmd:%x(%d),addr:%llx(%d),dummy:%d(%d),data_len:%x(%d)\n", ++ cs, op->cmd.opcode, op->cmd.buswidth, op->addr.val, ++ op->addr.buswidth, op->dummy.nbytes, op->dummy.buswidth, ++ op->data.nbytes, op->data.buswidth); ++ ++ addr_mode_reg = readl(aspi->regs + CE_CTRL_REG); ++ addr_mode_reg_backup = addr_mode_reg; ++ addr_data_mask = readl(aspi->regs + CMD_CTRL_REG); ++ ++ ctrl_val = chip->ctl_val[ASPEED_SPI_BASE]; ++ ctrl_val &= ~CTRL_IO_CMD_MASK; ++ ctrl_val |= aspeed_spi_get_io_mode(op, SPI_OP_ALL); ++ ++ /* configure opcode */ ++ ctrl_val |= op->cmd.opcode << 16; ++ ++ /* configure operation address, address length and address mask */ ++ if (op->addr.nbytes != 0) { ++ if (op->addr.nbytes == 3) ++ addr_mode_reg &= ~(0x11 << cs); ++ else ++ addr_mode_reg |= (0x11 << cs); ++ ++ addr_data_mask &= 0x0f; ++ op_addr = chip->ahb_base + op->addr.val; ++ } else { ++ addr_data_mask |= 0xf0; ++ op_addr = chip->ahb_base; ++ } ++ ++ if (op->dummy.nbytes != 0) { ++ ctrl_val |= ((op->dummy.nbytes & 0x3) << 6 | ++ ((op->dummy.nbytes & 0x4) >> 2) << 14); ++ } ++ ++ /* configure data io mode and data mask */ ++ if (op->data.nbytes != 0) { ++ addr_data_mask &= 0xF0; ++ data_byte = op->data.nbytes; ++ if (op->data.dir == SPI_MEM_DATA_OUT) { ++ if (data_byte % 4 != 0) { ++ memset(aspi->op_buf, 0xff, ++ (((data_byte + 3) / 4) * 4)); ++ memcpy(aspi->op_buf, op->data.buf.out, data_byte); ++ data_buf = aspi->op_buf; ++ data_byte = (((data_byte + 3) / 4) * 4); ++ } else { ++ data_buf = op->data.buf.out; ++ } ++ } else { ++ data_buf = op->data.buf.in; ++ } ++ } else { ++ addr_data_mask |= 0x0f; ++ data_byte = 1; ++ data_buf = &dummy_data; ++ } ++ ++ /* configure command mode */ ++ if (op->data.dir == SPI_MEM_DATA_OUT) ++ ctrl_val |= CTRL_IO_MODE_WRITE; ++ else ++ ctrl_val |= CTRL_IO_MODE_READ; ++ ++ /* set controller registers */ ++ writel(ctrl_val, chip->ctl); ++ writel(addr_mode_reg, aspi->regs + CE_CTRL_REG); ++ writel(addr_data_mask, aspi->regs + CMD_CTRL_REG); ++ ++ dev_dbg(dev, "ctrl: 0x%08x, addr_mode: 0x%x, mask: 0x%x, addr:0x%p\n", ++ ctrl_val, addr_mode_reg, addr_data_mask, op_addr); ++ ++ /* trigger spi transmission or reception sequence */ ++ spin_lock_irqsave(&g_lock, flags); ++ ++ if (op->data.dir == SPI_MEM_DATA_OUT) ++ memcpy_toio(op_addr, data_buf, data_byte); ++ else ++ memcpy_fromio((void *)data_buf, op_addr, data_byte); ++ ++ spin_unlock_irqrestore(&g_lock, flags); ++ ++ /* restore controller setting */ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ writel(addr_mode_reg_backup, aspi->regs + CE_CTRL_REG); ++ writel(0x0, aspi->regs + CMD_CTRL_REG); ++ ++ return 0; ++} ++ ++#define MAX_READ_SZ_ONCE 0x3000 /* 12KB */ ++ ++/* ++ * When DMA memory mode is enabled, there is a limitation for AST2600, ++ * both DMA source and destination address should be 4-byte aligned. ++ * Thus, a 4-byte aligned buffer should be allocated previously and ++ * CPU needs to copy data from it after DMA done. ++ */ ++static ssize_t aspeed_2600_spi_dirmap_dma_read(struct spi_mem_dirmap_desc *desc, ++ u64 offs, size_t len, void *buf) ++{ ++ int ret = 0; ++ u32 timeout = 0; ++ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller); ++ struct device *dev = aspi->dev; ++ struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; ++ struct spi_mem_op op_tmpl = desc->info.op_tmpl; ++ u32 reg_val; ++ u32 extra; ++ u32 tb_read_len = len; ++ u32 read_len; ++ u32 buf_offs = 0; ++ u32 flash_offs = (u32)offs; ++ ++ if (chip->ahb_window_sz < offs + len) { ++ dev_err(dev, "read range exceeds flash remapping size\n"); ++ return 0; ++ } ++ ++ dev_dbg(dev, "read op:0x%x, addr:0x%llx, len:0x%zx\n", ++ op_tmpl.cmd.opcode, offs, len); ++ ++ while (tb_read_len > 0) { ++ /* read max 10KB bytes once */ ++ read_len = MAX_READ_SZ_ONCE - (flash_offs % MAX_READ_SZ_ONCE); ++ if (tb_read_len < read_len) ++ read_len = tb_read_len; ++ ++ /* For AST2600 SPI DMA, flash offset should be 4 byte aligned */ ++ extra = flash_offs % 4; ++ if (extra != 0) { ++ flash_offs = (flash_offs / 4) * 4; ++ read_len += extra; ++ } ++ ++ writel(DMA_GET_REQ_MAGIC, aspi->regs + DMA_CTRL_REG); ++ if (readl(aspi->regs + DMA_CTRL_REG) & SPI_DAM_REQUEST) { ++ while (!(readl(aspi->regs + DMA_CTRL_REG) & ++ SPI_DAM_GRANT)) ++ ; ++ } ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ ++ /* ++ * don't use dma_map_single here, since we cannot make sure the buf's ++ * start address is 4-byte-aligned. ++ */ ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel(aspi->dma_addr_phy, aspi->regs + DMA_RAM_ADDR_REG); ++ writel(chip->ahb_base_phy + flash_offs, aspi->regs + DMA_FLASH_ADDR_REG); ++ writel(read_len - 1, aspi->regs + DMA_LEN_REG); ++ ++ /* enable DMA irq */ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val |= SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ reinit_completion(&aspi->dma_done); ++ ++ /* enable read DMA */ ++ writel(0x1, aspi->regs + DMA_CTRL_REG); ++ timeout = wait_for_completion_timeout(&aspi->dma_done, ++ msecs_to_jiffies(2000)); ++ if (timeout == 0) { ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel(DMA_DISCARD_REQ_MAGIC, aspi->regs + DMA_CTRL_REG); ++ dev_err(dev, "read data timeout %d\n", ret); ++ ret = -1; ++ goto end; ++ } else { ++ memcpy(buf + buf_offs, aspi->op_buf + extra, read_len - extra); ++ } ++ ++ read_len -= extra; ++ ++ buf_offs += read_len; ++ flash_offs += read_len; ++ tb_read_len -= read_len; ++ } ++ ++end: ++ return ret ? 0 : len; ++} ++ ++static ssize_t aspeed_2600_spi_dirmap_dma_write(struct spi_mem_dirmap_desc *desc, ++ u64 offs, size_t len, ++ const void *buf) ++{ ++ int ret = 0; ++ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller); ++ struct device *dev = aspi->dev; ++ struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; ++ u32 timeout = 0; ++ u32 reg_val; ++ struct spi_mem_op op_tmpl = desc->info.op_tmpl; ++ ++ if (chip->ahb_window_sz < offs + len) { ++ dev_info(dev, "write range exceeds flash remapping size\n"); ++ return 0; ++ } ++ ++ if (len < 1) ++ return 0; ++ ++ if (len > ASPEED_SPI_OP_BUF_LEN) { ++ dev_info(dev, ++ "written length exceeds expected value (0x%zx)\n", ++ len); ++ return 0; ++ } ++ ++ dev_dbg(dev, "write op:0x%x, addr:0x%llx, len:0x%zx\n", ++ op_tmpl.cmd.opcode, offs, len); ++ ++ writel(DMA_GET_REQ_MAGIC, aspi->regs + DMA_CTRL_REG); ++ if (readl(aspi->regs + DMA_CTRL_REG) & SPI_DAM_REQUEST) { ++ while (!(readl(aspi->regs + DMA_CTRL_REG) & ++ SPI_DAM_GRANT)) ++ ; ++ } ++ ++ writel(chip->ctl_val[ASPEED_SPI_WRITE], chip->ctl); ++ ++ /* ++ * don't use dma_map_single here, since we cannot make sure the buf's ++ * start address is 4-byte-aligned. ++ */ ++ memcpy(aspi->op_buf, buf, len); ++ ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel(aspi->dma_addr_phy, aspi->regs + DMA_RAM_ADDR_REG); ++ writel(chip->ahb_base_phy + offs, aspi->regs + DMA_FLASH_ADDR_REG); ++ writel(len - 1, aspi->regs + DMA_LEN_REG); ++ ++ /* enable DMA irq */ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val |= SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ reinit_completion(&aspi->dma_done); ++ ++ /* enable write DMA */ ++ writel(0x3, aspi->regs + DMA_CTRL_REG); ++ timeout = wait_for_completion_timeout(&aspi->dma_done, msecs_to_jiffies(2000)); ++ if (timeout == 0) { ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel(DMA_DISCARD_REQ_MAGIC, aspi->regs + DMA_CTRL_REG); ++ dev_err(dev, "write data timeout %d\n", ret); ++ ret = -1; ++ } ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ ++ return ret ? 0 : len; ++} ++ ++/* ++ * When DMA memory mode is enabled, there is a limitation for AST2700, ++ * both DMA source and destination address should be 4-byte aligned. ++ * Thus, a 4-byte aligned buffer should be allocated previously and ++ * CPU needs to copy data from it after DMA done. ++ */ ++static ssize_t aspeed_2700_spi_dirmap_dma_read(struct spi_mem_dirmap_desc *desc, ++ u64 offs, size_t len, void *buf) ++{ ++ int ret = 0; ++ u32 timeout = 0; ++ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller); ++ struct device *dev = aspi->dev; ++ struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; ++ struct spi_mem_op op_tmpl = desc->info.op_tmpl; ++ u32 reg_val; ++ u32 extra; ++ u32 tb_read_len = len; ++ u32 read_len; ++ u32 buf_offs = 0; ++ u32 flash_offs = (u32)offs; ++ u32 flash_addr; ++ ++ if (chip->ahb_window_sz < offs + len) { ++ dev_err(dev, "read range exceeds flash remapping size\n"); ++ return 0; ++ } ++ ++ dev_dbg(dev, "read op:0x%x, addr:0x%llx, len:0x%zx\n", ++ op_tmpl.cmd.opcode, offs, len); ++ ++ while (tb_read_len > 0) { ++ /* read max 10KB bytes once */ ++ read_len = MAX_READ_SZ_ONCE - (flash_offs % MAX_READ_SZ_ONCE); ++ if (tb_read_len < read_len) ++ read_len = tb_read_len; ++ ++ /* For AST2600 SPI DMA, flash offset should be 4 byte aligned */ ++ extra = flash_offs % 4; ++ if (extra != 0) { ++ flash_offs = (flash_offs / 4) * 4; ++ read_len += extra; ++ } ++ ++ reg_val = readl(aspi->regs + MISC_CTRL_REG); ++ reg_val |= (SPI_CS_CONTINUOUS | SPI_CS_TO_DIS); ++ writel(reg_val, aspi->regs + MISC_CTRL_REG); ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ ++ /* ++ * - Don't use dma_map_single here, since we cannot make sure the buf's ++ * start address is 4-byte-aligned. ++ * - Only support from SPI flash to DRAM. ++ */ ++ flash_addr = (chip->ahb_base_phy + flash_offs) & 0x7fffffff; ++ ++ writel(DMA_DRAM_HI_ADDR, aspi->regs + DMA_HI_ADDR_REG); ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel((u32)aspi->dma_addr_phy, aspi->regs + DMA_RAM_ADDR_REG); ++ writel(flash_addr, aspi->regs + DMA_FLASH_ADDR_REG); ++ writel(read_len - 1, aspi->regs + DMA_LEN_REG); ++ ++ /* enable DMA irq */ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val |= SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ reinit_completion(&aspi->dma_done); ++ ++ /* enable read DMA */ ++ writel(0x1, aspi->regs + DMA_CTRL_REG); ++ timeout = wait_for_completion_timeout(&aspi->dma_done, ++ msecs_to_jiffies(2000)); ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ] | CTRL_CE_STOP_ACTIVE, ++ chip->ctl); ++ ++ if (timeout == 0) { ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ dev_err(dev, "dma read data timeout %d\n", ret); ++ ret = -1; ++ goto end; ++ } else { ++ memcpy_fromio(buf + buf_offs, aspi->op_buf + extra, read_len - extra); ++ } ++ ++ read_len -= extra; ++ ++ buf_offs += read_len; ++ flash_offs += read_len; ++ tb_read_len -= read_len; ++ } ++ ++end: ++ writel(0x0, aspi->regs + MISC_CTRL_REG); ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ ++ return ret ? 0 : len; ++} ++ ++static ssize_t aspeed_2700_spi_dirmap_dma_write(struct spi_mem_dirmap_desc *desc, ++ u64 offs, size_t len, const void *buf) ++{ ++ int ret = 0; ++ struct aspeed_spi *aspi = spi_controller_get_devdata(desc->mem->spi->controller); ++ struct device *dev = aspi->dev; ++ struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; ++ u32 timeout = 0; ++ u32 reg_val; ++ struct spi_mem_op op_tmpl = desc->info.op_tmpl; ++ u32 flash_addr; ++ ++ if (chip->ahb_window_sz < offs + len) { ++ dev_info(dev, "write range exceeds flash remapping size\n"); ++ return 0; ++ } ++ ++ if (len < 1) ++ return 0; ++ ++ if (len > ASPEED_SPI_OP_BUF_LEN) { ++ dev_info(dev, ++ "written length exceeds expected value (0x%zx)\n", ++ len); ++ return 0; ++ } ++ ++ dev_dbg(dev, "write op:0x%x, addr:0x%08llx, len:0x%08zx\n", ++ op_tmpl.cmd.opcode, offs, len); ++ ++ reg_val = readl(aspi->regs + MISC_CTRL_REG); ++ reg_val |= (SPI_CS_TO_DIS); ++ writel(reg_val, aspi->regs + MISC_CTRL_REG); ++ ++ writel(chip->ctl_val[ASPEED_SPI_WRITE], chip->ctl); ++ ++ /* ++ * - Don't use dma_map_single here, since we cannot make sure the buf's ++ * start address is 4-byte-aligned. ++ * - Only support from SPI flash to DRAM. ++ */ ++ flash_addr = (chip->ahb_base_phy + offs) & 0x7fffffff; ++ memcpy_toio(aspi->op_buf, buf, len); ++ writel(DMA_DRAM_HI_ADDR, aspi->regs + DMA_HI_ADDR_REG); ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel((u32)aspi->dma_addr_phy, aspi->regs + DMA_RAM_ADDR_REG); ++ writel(flash_addr, aspi->regs + DMA_FLASH_ADDR_REG); ++ writel(len - 1, aspi->regs + DMA_LEN_REG); ++ ++ /* enable DMA irq */ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val |= SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ reinit_completion(&aspi->dma_done); ++ ++ /* enable write DMA */ ++ writel(0x3, aspi->regs + DMA_CTRL_REG); ++ timeout = wait_for_completion_timeout(&aspi->dma_done, msecs_to_jiffies(2000)); ++ ++ if (timeout == 0) { ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ dev_err(dev, "write data timeout %d\n", ret); ++ ret = -1; ++ } ++ ++ writel(chip->ctl_val[ASPEED_SPI_WRITE] | CTRL_CE_STOP_ACTIVE, ++ chip->ctl); ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ writel(0x0, aspi->regs + MISC_CTRL_REG); ++ ++ return ret ? 0 : len; ++} ++ ++static irqreturn_t aspeed_2600_spi_dma_isr(int irq, void *dev_id) ++{ ++ struct aspeed_spi *aspi = (struct aspeed_spi *)dev_id; ++ u32 reg_val; ++ ++ if (!(readl(aspi->regs + INTR_CTRL_STATUS_REG) & SPI_DMA_STATUS)) ++ return IRQ_NONE; ++ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val &= ~SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ writel(DMA_DISCARD_REQ_MAGIC, aspi->regs + DMA_CTRL_REG); ++ ++ complete(&aspi->dma_done); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t aspeed_2700_spi_dma_isr(int irq, void *dev_id) ++{ ++ struct aspeed_spi *aspi = (struct aspeed_spi *)dev_id; ++ u32 reg_val; ++ ++ if (!(readl(aspi->regs + INTR_CTRL_STATUS_REG) & SPI_DMA_STATUS) || ++ !(readl(aspi->regs + INTR_CTRL_STATUS_REG) & SPI_DMA_IRQ_STS)) { ++ return IRQ_NONE; ++ } ++ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val &= ~SPI_DMA_IRQ_EN; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ reg_val = readl(aspi->regs + INTR_CTRL_STATUS_REG); ++ reg_val |= SPI_DMA_IRQ_STS; ++ writel(reg_val, aspi->regs + INTR_CTRL_STATUS_REG); ++ ++ writel(0x0, aspi->regs + DMA_CTRL_REG); ++ ++ complete(&aspi->dma_done); ++ ++ return IRQ_HANDLED; ++} ++ + static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) + { + int ret; +@@ -383,171 +987,310 @@ + spi_get_chipselect(mem->spi, 0)); + } + +-struct aspeed_spi_window { +- u32 cs; +- u32 offset; +- u32 size; +-}; ++static const struct aspeed_spi_data ast2500_fmc_data; ++static const struct aspeed_spi_data ast2500_spi_data; ++static const struct aspeed_spi_data ast2600_spi_data; ++static const struct aspeed_spi_data ast2600_fmc_data; + +-static void aspeed_spi_get_windows(struct aspeed_spi *aspi, +- struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS]) ++static int aspeed_spi_set_window(struct aspeed_spi *aspi) + { +- const struct aspeed_spi_data *data = aspi->data; +- u32 reg_val; ++ struct device *dev = aspi->dev; ++ off_t offset = 0; ++ phys_addr_t start; ++ phys_addr_t ltpi_start; ++ phys_addr_t end; ++ void __iomem *seg_reg_base = aspi->regs + CE0_SEGMENT_ADDR_REG; ++ void __iomem *seg_reg; ++ u32 seg_val; + u32 cs; ++ size_t win_sz; + + for (cs = 0; cs < aspi->data->max_cs; cs++) { +- reg_val = readl(aspi->regs + CE0_SEGMENT_ADDR_REG + cs * 4); +- windows[cs].cs = cs; +- windows[cs].size = data->segment_end(aspi, reg_val) - +- data->segment_start(aspi, reg_val); +- windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy; +- dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs, +- windows[cs].offset, windows[cs].size); ++ if (aspi->chips[cs].ahb_base) ++ devm_iounmap(dev, aspi->chips[cs].ahb_base); + } ++ ++ for (cs = 0; cs < aspi->data->max_cs; cs++) { ++ seg_reg = seg_reg_base + cs * 4; ++ start = aspi->ahb_base_phy + offset; ++ ltpi_start = aspi->ltpi_base_phy + offset; ++ win_sz = aspi->chips[cs].ahb_window_sz; ++ end = start + win_sz; ++ ++ seg_val = aspi->data->segment_reg(aspi, start, end); ++ if (win_sz == 0) ++ seg_val = 0; ++ ++ writel(seg_val, seg_reg); ++ ++ if (seg_val != readl(seg_reg)) { ++ dev_warn(dev, "CE%d expected window [ 0x%.9llx - 0x%.9llx ] %zdMB", ++ cs, (u64)start, (u64)end - 1, win_sz >> 20); ++ dev_warn(dev, "seg_val = 0x%x, readl(seg_reg) = 0x%x\n", ++ seg_val, readl(seg_reg)); ++ ++ seg_val = readl(seg_reg); ++ dev_warn(dev, "restore to 0x%x\n", seg_val); ++ ++ win_sz = aspi->data->segment_end(aspi, seg_val) - ++ aspi->data->segment_start(aspi, seg_val); ++ ++ if (win_sz < 0) ++ return -ERANGE; ++ ++ end = start + win_sz; ++ } ++ ++ if (win_sz != 0) ++ dev_dbg(dev, "CE%d new window [ 0x%.9llx - 0x%.9llx ] %zdMB", ++ cs, (u64)start, (u64)end - 1, win_sz >> 20); ++ else ++ dev_dbg(dev, "CE%d window closed", cs); ++ ++ aspi->chips[cs].ahb_base_phy = start; ++ offset += win_sz; ++ ++ if (offset > aspi->ahb_window_sz) { ++ dev_err(dev, "offset value 0x%llx is too large.\n", (u64)offset); ++ return -ENOSPC; ++ } ++ ++ if (win_sz == 0) ++ continue; ++ ++ if ((aspi->flag & ASPEED_SPI_MIN_WINDOW) != 0) { ++ aspi->chips[cs].ahb_base = devm_ioremap(dev, ++ start, ++ aspi->data->min_window_sz); ++ } else { ++ if ((aspi->flag & ASPEED_SPI_LTPI_SUPPORT) != 0) ++ aspi->chips[cs].ahb_base = devm_ioremap(dev, ++ ltpi_start, ++ win_sz); ++ else ++ aspi->chips[cs].ahb_base = devm_ioremap(dev, ++ start, ++ win_sz); ++ } ++ ++ if (!aspi->chips[cs].ahb_base) { ++ dev_err(dev, "fail to remap window [0x%.9llx - 0x%.9llx]\n", ++ (u64)start, (u64)end - 1); ++ return -ENOMEM; ++ } ++ } ++ ++ return 0; + } + + /* +- * On the AST2600, some CE windows are closed by default at reset but +- * U-Boot should open all. ++ * Usually, the decoding address is not configured at the u-boot stage. ++ * Or, the existing decoding address configuration is wrong. ++ * Thus, force to assign a default decoding address during driver probe. + */ +-static int aspeed_spi_chip_set_default_window(struct aspeed_spi_chip *chip) ++static int aspeed_spi_chip_set_default_window(struct aspeed_spi *aspi) + { +- struct aspeed_spi *aspi = chip->aspi; +- struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; +- struct aspeed_spi_window *win = &windows[chip->cs]; ++ u32 cs; + + /* No segment registers for the AST2400 SPI controller */ + if (aspi->data == &ast2400_spi_data) { +- win->offset = 0; +- win->size = aspi->ahb_window_size; +- } else { +- aspeed_spi_get_windows(aspi, windows); ++ aspi->chips[0].ahb_window_sz = aspi->ahb_window_sz; ++ return aspeed_spi_set_window(aspi); + } + +- chip->ahb_base = aspi->ahb_base + win->offset; +- chip->ahb_window_size = win->size; ++ for (cs = 0; cs < aspi->num_cs; cs++) ++ aspi->chips[cs].ahb_window_sz = aspi->data->min_window_sz; ++ ++ /* Close unused CS */ ++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++) ++ aspi->chips[cs].ahb_window_sz = 0; + +- dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB", +- chip->cs, aspi->ahb_base_phy + win->offset, +- aspi->ahb_base_phy + win->offset + win->size - 1, +- win->size >> 20); ++ if (aspi->data->adjust_window) ++ aspi->data->adjust_window(aspi); + +- return chip->ahb_window_size ? 0 : -1; ++ return aspeed_spi_set_window(aspi); + } + +-static int aspeed_spi_set_window(struct aspeed_spi *aspi, +- const struct aspeed_spi_window *win) ++/* ++ * As the flash size grows up, we need to trim some decoding ++ * size if needed for the sake of conforming the maximum ++ * decoding size. We trim the decoding size from the largest ++ * CS in order to avoid affecting the default boot up sequence ++ * from CS0 where command mode or normal mode is used. ++ * Notice, if a CS decoding size is trimmed, command mode may ++ * not work perfectly on that CS. ++ */ ++static int aspeed_spi_trim_window_size(struct aspeed_spi *aspi) + { +- u32 start = aspi->ahb_base_phy + win->offset; +- u32 end = start + win->size; +- void __iomem *seg_reg = aspi->regs + CE0_SEGMENT_ADDR_REG + win->cs * 4; +- u32 seg_val_backup = readl(seg_reg); +- u32 seg_val = aspi->data->segment_reg(aspi, start, end); ++ struct aspeed_spi_chip *chips = aspi->chips; ++ size_t total_sz; ++ int cs = aspi->data->max_cs - 1; ++ u32 i; ++ bool trimed = false; ++ ++ do { ++ total_sz = 0; ++ for (i = 0; i < aspi->data->max_cs; i++) ++ total_sz += chips[i].ahb_window_sz; + +- if (seg_val == seg_val_backup) +- return 0; ++ if (cs < 0) ++ return -ENOMEM; ++ ++ if (chips[cs].ahb_window_sz <= aspi->data->min_window_sz) { ++ cs--; ++ continue; ++ } ++ ++ if (total_sz > aspi->ahb_window_sz) { ++ chips[cs].ahb_window_sz -= aspi->data->min_window_sz; ++ total_sz -= aspi->data->min_window_sz; ++ trimed = true; ++ } ++ } while (total_sz > aspi->ahb_window_sz); ++ ++ if (trimed) { ++ dev_warn(aspi->dev, "trimed window size:\n"); ++ for (cs = 0; cs < aspi->data->max_cs; cs++) { ++ dev_warn(aspi->dev, "CE%d: 0x%08zx\n", ++ cs, chips[cs].ahb_window_sz); ++ } ++ } ++ ++ return 0; ++} ++ ++static int aspeed_adjust_window_ast2400(struct aspeed_spi *aspi) ++{ ++ int ret; ++ int cs; ++ struct aspeed_spi_chip *chips = aspi->chips; ++ ++ /* Close unused CS. */ ++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++) ++ chips[cs].ahb_window_sz = 0; ++ ++ ret = aspeed_spi_trim_window_size(aspi); ++ if (ret != 0) ++ return ret; + +- writel(seg_val, seg_reg); ++ return 0; ++} ++ ++/* ++ * For AST2500, the minimum address decoding size for each CS ++ * is 8MB instead of zero. This address decoding size is ++ * mandatory for each CS no matter whether it will be used. ++ * This is a HW limitation. ++ */ ++static int aspeed_adjust_window_ast2500(struct aspeed_spi *aspi) ++{ ++ int ret; ++ int i; ++ int cs; ++ size_t pre_sz; ++ size_t extra_sz; ++ struct aspeed_spi_chip *chips = aspi->chips; ++ ++ /* Assign min_window_sz to unused CS. */ ++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++) { ++ if (chips[cs].ahb_window_sz < aspi->data->min_window_sz) ++ chips[cs].ahb_window_sz = aspi->data->min_window_sz; ++ } + + /* +- * Restore initial value if something goes wrong else we could +- * loose access to the chip. ++ * If commnad mode or normal mode is used, the start address of a ++ * decoding range should be multiple of its related flash size. ++ * Namely, the total decoding size from flash 0 to flash N should ++ * be multiple of the size of flash (N + 1). + */ +- if (seg_val != readl(seg_reg)) { +- dev_err(aspi->dev, "CE%d invalid window [ 0x%.8x - 0x%.8x ] %dMB", +- win->cs, start, end - 1, win->size >> 20); +- writel(seg_val_backup, seg_reg); +- return -EIO; ++ for (cs = aspi->num_cs - 1; cs >= 0; cs--) { ++ pre_sz = 0; ++ for (i = 0; i < cs; i++) ++ pre_sz += chips[i].ahb_window_sz; ++ ++ if (chips[cs].ahb_window_sz != 0 && ++ (pre_sz % chips[cs].ahb_window_sz) != 0) { ++ extra_sz = chips[cs].ahb_window_sz - ++ (pre_sz % chips[cs].ahb_window_sz); ++ chips[0].ahb_window_sz += extra_sz; ++ } + } + +- if (win->size) +- dev_dbg(aspi->dev, "CE%d new window [ 0x%.8x - 0x%.8x ] %dMB", +- win->cs, start, end - 1, win->size >> 20); +- else +- dev_dbg(aspi->dev, "CE%d window closed", win->cs); ++ ret = aspeed_spi_trim_window_size(aspi); ++ if (ret != 0) ++ return ret; ++ ++ if (aspi->data == &ast2500_spi_data) ++ chips[1].ahb_window_sz = 0x08000000 - chips[0].ahb_window_sz; ++ ++ return 0; ++} ++ ++static int aspeed_adjust_window_ast2600(struct aspeed_spi *aspi) ++{ ++ int ret; ++ int i; ++ int cs; ++ size_t pre_sz; ++ size_t extra_sz; ++ struct aspeed_spi_chip *chips = aspi->chips; ++ ++ /* Close unused CS. */ ++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++) ++ chips[cs].ahb_window_sz = 0; ++ ++ /* ++ * If commnad mode or normal mode is used, the start address of a ++ * decoding range should be multiple of its related flash size. ++ * Namely, the total decoding size from flash 0 to flash N should ++ * be multiple of the size of flash (N + 1). ++ */ ++ for (cs = aspi->num_cs - 1; cs >= 0; cs--) { ++ pre_sz = 0; ++ for (i = 0; i < cs; i++) ++ pre_sz += chips[i].ahb_window_sz; ++ ++ if (chips[cs].ahb_window_sz != 0 && ++ (pre_sz % chips[cs].ahb_window_sz) != 0) { ++ extra_sz = chips[cs].ahb_window_sz - ++ (pre_sz % chips[cs].ahb_window_sz); ++ chips[0].ahb_window_sz += extra_sz; ++ } ++ } ++ ++ ret = aspeed_spi_trim_window_size(aspi); ++ if (ret != 0) ++ return ret; + + return 0; + } + + /* + * Yet to be done when possible : +- * - Align mappings on flash size (we don't have the info) + * - ioremap each window, not strictly necessary since the overall window + * is correct. + */ +-static const struct aspeed_spi_data ast2500_spi_data; +-static const struct aspeed_spi_data ast2600_spi_data; +-static const struct aspeed_spi_data ast2600_fmc_data; + + static int aspeed_spi_chip_adjust_window(struct aspeed_spi_chip *chip, +- u32 local_offset, u32 size) ++ size_t size) + { + struct aspeed_spi *aspi = chip->aspi; +- struct aspeed_spi_window windows[ASPEED_SPI_MAX_NUM_CS] = { 0 }; +- struct aspeed_spi_window *win = &windows[chip->cs]; + int ret; + + /* No segment registers for the AST2400 SPI controller */ + if (aspi->data == &ast2400_spi_data) + return 0; + +- /* +- * Due to an HW issue on the AST2500 SPI controller, the CE0 +- * window size should be smaller than the maximum 128MB. +- */ +- if (aspi->data == &ast2500_spi_data && chip->cs == 0 && size == SZ_128M) { +- size = 120 << 20; +- dev_info(aspi->dev, "CE%d window resized to %dMB (AST2500 HW quirk)", +- chip->cs, size >> 20); +- } +- +- /* +- * The decoding size of AST2600 SPI controller should set at +- * least 2MB. +- */ +- if ((aspi->data == &ast2600_spi_data || aspi->data == &ast2600_fmc_data) && +- size < SZ_2M) { +- size = SZ_2M; +- dev_info(aspi->dev, "CE%d window resized to %dMB (AST2600 Decoding)", +- chip->cs, size >> 20); +- } +- +- aspeed_spi_get_windows(aspi, windows); +- + /* Adjust this chip window */ +- win->offset += local_offset; +- win->size = size; ++ aspi->chips[chip->cs].ahb_window_sz = size; + +- if (win->offset + win->size > aspi->ahb_window_size) { +- win->size = aspi->ahb_window_size - win->offset; +- dev_warn(aspi->dev, "CE%d window resized to %dMB", chip->cs, win->size >> 20); +- } ++ if (aspi->data->adjust_window) ++ aspi->data->adjust_window(aspi); + +- ret = aspeed_spi_set_window(aspi, win); ++ ret = aspeed_spi_set_window(aspi); + if (ret) + return ret; + +- /* Update chip mapping info */ +- chip->ahb_base = aspi->ahb_base + win->offset; +- chip->ahb_window_size = win->size; +- +- /* +- * Also adjust next chip window to make sure that it does not +- * overlap with the current window. +- */ +- if (chip->cs < aspi->data->max_cs - 1) { +- struct aspeed_spi_window *next = &windows[chip->cs + 1]; +- +- /* Change offset and size to keep the same end address */ +- if ((next->offset + next->size) > (win->offset + win->size)) +- next->size = (next->offset + next->size) - (win->offset + win->size); +- else +- next->size = 0; +- next->offset = win->offset + win->size; +- +- aspeed_spi_set_window(aspi, next); +- } + return 0; + } + +@@ -559,6 +1302,9 @@ + struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; + struct spi_mem_op *op = &desc->info.op_tmpl; + u32 ctl_val; ++ u32 reg_val; ++ u32 div = 0; ++ int i; + int ret = 0; + + dev_dbg(aspi->dev, +@@ -571,50 +1317,108 @@ + + chip->clk_freq = desc->mem->spi->max_speed_hz; + ++ if (aspi->data->safs_init) ++ aspi->data->safs_init(aspi, op); ++ + /* Only for reads */ +- if (op->data.dir != SPI_MEM_DATA_IN) +- return -EOPNOTSUPP; ++ if (op->data.dir == SPI_MEM_DATA_IN) { ++ ret = aspeed_spi_chip_adjust_window(chip, desc->info.length); ++ if (ret) ++ return ret; + +- aspeed_spi_chip_adjust_window(chip, desc->info.offset, desc->info.length); ++ if (desc->info.length > chip->ahb_window_sz) ++ dev_warn(aspi->dev, "CE%d window (%zdMB) too small for mapping", ++ chip->cs, chip->ahb_window_sz >> 20); ++ ++ /* Define the default IO read settings */ ++ ctl_val = chip->ctl_val[ASPEED_SPI_BASE] & ~CTRL_IO_CMD_MASK; ++ ctl_val |= aspeed_spi_get_io_mode(op, SPI_OP_ALL) | ++ op->cmd.opcode << CTRL_COMMAND_SHIFT | ++ CTRL_IO_MODE_READ; ++ ++ if (op->dummy.nbytes) ++ ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes); ++ ++ if (op->addr.buswidth == 4) { ++ ctl_val |= BIT(15); ++ reg_val = readl(aspi->regs + MISC_CTRL_REG); ++ reg_val |= DUMMY_OUTPUT_DATA; ++ writel(reg_val, aspi->regs + MISC_CTRL_REG); ++ } + +- if (desc->info.length > chip->ahb_window_size) +- dev_warn(aspi->dev, "CE%d window (%dMB) too small for mapping", +- chip->cs, chip->ahb_window_size >> 20); +- +- /* Define the default IO read settings */ +- ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK; +- ctl_val |= aspeed_spi_get_io_mode(op) | +- op->cmd.opcode << CTRL_COMMAND_SHIFT | +- CTRL_IO_MODE_READ; +- +- if (op->dummy.nbytes) +- ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); +- +- /* Tune 4BYTE address mode */ +- if (op->addr.nbytes) { +- u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); ++ /* Tune 4BYTE address mode */ ++ if (op->addr.nbytes) { ++ u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); ++ ++ if (op->addr.nbytes == 4) ++ addr_mode |= (0x11 << chip->cs); ++ else ++ addr_mode &= ~(0x11 << chip->cs); ++ writel(addr_mode, aspi->regs + CE_CTRL_REG); + +- if (op->addr.nbytes == 4) +- addr_mode |= (0x11 << chip->cs); +- else +- addr_mode &= ~(0x11 << chip->cs); +- writel(addr_mode, aspi->regs + CE_CTRL_REG); ++ /* AST2400 SPI controller sets 4BYTE address mode in ++ * CE0 Control Register ++ */ ++ if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) ++ ctl_val |= CTRL_IO_ADDRESS_4B; ++ } ++ ++ /* READ mode is the controller default setting */ ++ chip->ctl_val[ASPEED_SPI_READ] = ctl_val; ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ ++ /* assign SPI clock frequency division */ ++ if (chip->clk_freq < aspi->clk_freq / 5) { ++ if (aspi->data->get_clk_div) ++ div = aspi->data->get_clk_div(chip, chip->clk_freq); ++ ++ for (i = 0; i < ASPEED_SPI_MAX; i++) ++ chip->ctl_val[i] = (chip->ctl_val[i] & ++ aspi->data->hclk_mask) | ++ div; ++ ++ writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ } else { ++ ret = aspeed_spi_do_calibration(chip); ++ } + +- /* AST2400 SPI controller sets 4BYTE address mode in +- * CE0 Control Register ++ dev_info(aspi->dev, "CE%d read buswidth: %d [0x%08x]\n", ++ chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]); ++ dev_dbg(aspi->dev, "spi clock frequency: %dMHz\n", ++ chip->clk_freq / 1000000); ++ ++ /* ++ * aspeed_spi_dirmap_read is not created in ++ * current spi_controller_mem_ops. + */ +- if (op->addr.nbytes == 4 && chip->aspi->data == &ast2400_spi_data) +- ctl_val |= CTRL_IO_ADDRESS_4B; +- } ++ if (!desc->mem->spi->controller->mem_ops->dirmap_read) ++ return -EOPNOTSUPP; + +- /* READ mode is the controller default setting */ +- chip->ctl_val[ASPEED_SPI_READ] = ctl_val; +- writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ } else if (op->data.dir == SPI_MEM_DATA_OUT) { ++ /* record some information for normal mode. */ ++ ctl_val = chip->ctl_val[ASPEED_SPI_BASE] & (~CTRL_IO_CMD_MASK); ++ ctl_val |= aspeed_spi_get_io_mode(op, SPI_OP_ALL) | ++ op->cmd.opcode << 16 | CTRL_IO_MODE_WRITE; ++ ++ if ((aspi->flag & ASPEED_SPI_FIXED_LOW_W_CLK) != 0) { ++ /* adjust spi clk for write */ ++ ctl_val = (ctl_val & (~0x0f000f00)) | 0x03000000; ++ } + +- ret = aspeed_spi_do_calibration(chip); ++ chip->ctl_val[ASPEED_SPI_WRITE] = ctl_val; + +- dev_info(aspi->dev, "CE%d read buswidth:%d [0x%08x]\n", +- chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_READ]); ++ dev_info(aspi->dev, "CE%d write buswidth: %d [0x%08x]\n", ++ chip->cs, op->data.buswidth, chip->ctl_val[ASPEED_SPI_WRITE]); ++ ++ /* ++ * aspeed_spi_dirmap_write is not created in ++ * current spi_controller_mem_ops. ++ */ ++ if (!desc->mem->spi->controller->mem_ops->dirmap_write) ++ return -EOPNOTSUPP; ++ } else { ++ return -EOPNOTSUPP; ++ } + + return ret; + } +@@ -626,7 +1430,7 @@ + struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)]; + + /* Switch to USER command mode if mapping window is too small */ +- if (chip->ahb_window_size < offset + len) { ++ if (chip->ahb_window_sz < offset + len) { + int ret; + + ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf); +@@ -647,6 +1451,48 @@ + .dirmap_read = aspeed_spi_dirmap_read, + }; + ++static const struct spi_controller_mem_ops aspeed_spi_mem_ops_pure_user = { ++ .supports_op = aspeed_spi_supports_op, ++ .exec_op = aspeed_spi_exec_op, ++ .get_name = aspeed_spi_get_name, ++ .dirmap_create = aspeed_spi_dirmap_create, ++}; ++ ++static const struct spi_controller_mem_ops aspeed_spi_mem_ops_normal_mode = { ++ .supports_op = aspeed_spi_supports_op, ++ .exec_op = aspeed_spi_exec_op_normal_mode, ++ .get_name = aspeed_spi_get_name, ++ .dirmap_create = aspeed_spi_dirmap_create, ++ .dirmap_read = aspeed_spi_dirmap_read, ++}; ++ ++static const struct spi_controller_mem_ops aspeed_spi_ops_normal_read_dma_write = { ++ .get_name = aspeed_spi_get_name, ++ .exec_op = aspeed_spi_exec_op_normal_mode, ++ .supports_op = aspeed_spi_supports_op, ++ .dirmap_create = aspeed_spi_dirmap_create, ++ .dirmap_read = aspeed_spi_dirmap_read, ++ .dirmap_write = aspeed_2600_spi_dirmap_dma_write, ++}; ++ ++static const struct spi_controller_mem_ops aspeed_2600_spi_ops_dma_mode = { ++ .get_name = aspeed_spi_get_name, ++ .exec_op = aspeed_spi_exec_op, ++ .supports_op = aspeed_spi_supports_op, ++ .dirmap_create = aspeed_spi_dirmap_create, ++ .dirmap_read = aspeed_2600_spi_dirmap_dma_read, ++ .dirmap_write = aspeed_2600_spi_dirmap_dma_write, ++}; ++ ++static const struct spi_controller_mem_ops aspeed_2700_spi_ops_dma_mode = { ++ .get_name = aspeed_spi_get_name, ++ .exec_op = aspeed_spi_exec_op, ++ .supports_op = aspeed_spi_supports_op, ++ .dirmap_create = aspeed_spi_dirmap_create, ++ .dirmap_read = aspeed_2700_spi_dirmap_dma_read, ++ .dirmap_write = aspeed_2700_spi_dirmap_dma_write, ++}; ++ + static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type) + { + u32 reg; +@@ -675,6 +1521,8 @@ + const struct aspeed_spi_data *data = aspi->data; + unsigned int cs = spi_get_chipselect(spi, 0); + struct aspeed_spi_chip *chip = &aspi->chips[cs]; ++ u32 clk_div = 0; ++ u32 i; + + chip->aspi = aspi; + chip->cs = cs; +@@ -684,15 +1532,20 @@ + if (data->hastype) + aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI); + +- if (aspeed_spi_chip_set_default_window(chip) < 0) { +- dev_warn(aspi->dev, "CE%d window invalid", cs); +- return -EINVAL; +- } +- + aspeed_spi_chip_enable(aspi, cs, true); + + chip->ctl_val[ASPEED_SPI_BASE] = CTRL_CE_STOP_ACTIVE | CTRL_IO_MODE_USER; + ++ if ((aspi->flag & ASPEED_SPI_TIMING_CLB_DISABLED) != 0) { ++ if (aspi->data->get_clk_div) ++ clk_div = aspi->data->get_clk_div(chip, spi->max_speed_hz); ++ ++ for (i = 0; i < ASPEED_SPI_MAX; i++) { ++ chip->ctl_val[i] = (chip->ctl_val[i] & aspi->data->hclk_mask) | ++ clk_div; ++ } ++ } ++ + dev_dbg(aspi->dev, "CE%d setup done\n", cs); + return 0; + } +@@ -722,7 +1575,8 @@ + struct spi_controller *ctlr; + struct aspeed_spi *aspi; + struct resource *res; +- int ret; ++ struct reset_control *reset; ++ int ret = 0; + + data = of_device_get_match_data(&pdev->dev); + if (!data) +@@ -737,17 +1591,13 @@ + aspi->data = data; + aspi->dev = dev; + +- aspi->regs = devm_platform_ioremap_resource(pdev, 0); ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ aspi->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(aspi->regs)) + return PTR_ERR(aspi->regs); + +- aspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res); +- if (IS_ERR(aspi->ahb_base)) { +- dev_err(dev, "missing AHB mapping window\n"); +- return PTR_ERR(aspi->ahb_base); +- } +- +- aspi->ahb_window_size = resource_size(res); ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ aspi->ahb_window_sz = resource_size(res); + aspi->ahb_base_phy = res->start; + + aspi->clk = devm_clk_get_enabled(&pdev->dev, NULL); +@@ -762,20 +1612,140 @@ + return -EINVAL; + } + ++ reset = devm_reset_control_get_exclusive(dev, NULL); ++ if (!IS_ERR(reset)) ++ reset_control_deassert(reset); ++ ++ aspi->flag = 0; ++ if (of_property_read_bool(dev->of_node, "fmc-spi-normal-mode")) ++ aspi->flag |= ASPEED_SPI_NORMAL_MODE; ++ else if (of_property_read_bool(dev->of_node, "fmc-spi-dma-write")) ++ aspi->flag |= ASPEED_SPI_DMA_WRITE_MODE; ++ else if (of_property_read_bool(dev->of_node, "fmc-spi-dma-mode")) ++ aspi->flag |= ASPEED_SPI_DMA_MODE; ++ else if (of_property_read_bool(dev->of_node, "pure-spi-mode-only")) ++ aspi->flag |= ASPEED_SPI_PURE_USER_MODE; ++ ++ if (of_property_read_bool(dev->of_node, "spi-quad-address")) ++ aspi->flag |= ASPEED_SPI_QUAD_ADDR_SUPPORT; ++ ++ if (of_property_read_bool(dev->of_node, "timing-calibration-disabled")) ++ aspi->flag |= ASPEED_SPI_TIMING_CLB_DISABLED; ++ ++ /* Should be set on AST2600-A1/A2 for errata 65 */ ++ if (of_property_read_bool(dev->of_node, "low-spi-clk-write")) { ++ dev_info(dev, "adopt low spi clk for write\n"); ++ aspi->flag |= ASPEED_SPI_FIXED_LOW_W_CLK; ++ } ++ ++ if (!of_property_read_u64(dev->of_node, "ltpi-base", &aspi->ltpi_base_phy)) { ++ dev_info(dev, "ltpi support\n"); ++ aspi->flag |= ASPEED_SPI_LTPI_SUPPORT; ++ } ++ ++ if ((aspi->flag & ASPEED_SPI_NORMAL_MODE) != 0 || ++ (aspi->flag & ASPEED_SPI_DMA_WRITE_MODE) != 0 || ++ (aspi->flag & ASPEED_SPI_DMA_MODE) != 0) { ++ if (data->ver == 2700) { ++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (ret) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return ret; ++ } ++ } ++ ++ aspi->op_buf = dma_alloc_coherent(dev, ++ ASPEED_SPI_OP_BUF_LEN, ++ &aspi->dma_addr_phy, ++ GFP_DMA | GFP_KERNEL); ++ if (!aspi->op_buf) { ++ dev_err(dev, "fail to alloc dma buffer\n"); ++ ret = -ENOMEM; ++ goto end; ++ } ++ } ++ ++ if ((aspi->flag & ASPEED_SPI_DMA_WRITE_MODE) != 0 || ++ (aspi->flag & ASPEED_SPI_DMA_MODE) != 0) { ++ aspi->irq = platform_get_irq(pdev, 0); ++ if (aspi->irq < 0) { ++ dev_err(dev, "fail to get irq (%d)\n", aspi->irq); ++ return aspi->irq; ++ } ++ ++ if (data->ver == 2600) { ++ ret = devm_request_irq(dev, aspi->irq, ++ aspeed_2600_spi_dma_isr, ++ IRQF_SHARED, dev_name(dev), ++ aspi); ++ } else if (data->ver == 2700) { ++ ret = devm_request_irq(dev, aspi->irq, ++ aspeed_2700_spi_dma_isr, ++ IRQF_SHARED, dev_name(dev), ++ aspi); ++ } ++ ++ if (ret < 0) { ++ dev_err(dev, "fail to request irq (%d)\n", ret); ++ goto end; ++ } ++ ++ init_completion(&aspi->dma_done); ++ } ++ ++ if ((aspi->flag & ASPEED_SPI_DMA_MODE) != 0 || ++ (aspi->flag & ASPEED_SPI_PURE_USER_MODE) != 0) ++ aspi->flag |= ASPEED_SPI_MIN_WINDOW; ++ + /* IRQ is for DMA, which the driver doesn't support yet */ + + ctlr->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | data->mode_bits; + ctlr->bus_num = pdev->id; +- ctlr->mem_ops = &aspeed_spi_mem_ops; + ctlr->setup = aspeed_spi_setup; + ctlr->cleanup = aspeed_spi_cleanup; +- ctlr->num_chipselect = data->max_cs; ++ ctlr->num_chipselect = of_get_available_child_count(dev->of_node); + ctlr->dev.of_node = dev->of_node; + ++ if ((aspi->flag & ASPEED_SPI_NORMAL_MODE) != 0) { ++ dev_info(&pdev->dev, "normal mode is used\n"); ++ ctlr->mem_ops = &aspeed_spi_mem_ops_normal_mode; ++ } else if ((aspi->flag & ASPEED_SPI_DMA_WRITE_MODE) != 0) { ++ dev_info(&pdev->dev, "normal read and dma write mode are used\n"); ++ ctlr->mem_ops = &aspeed_spi_ops_normal_read_dma_write; ++ } else if ((aspi->flag & ASPEED_SPI_DMA_MODE) != 0) { ++ dev_info(&pdev->dev, "dma mode is used\n"); ++ if (data->ver == 2600) ++ ctlr->mem_ops = &aspeed_2600_spi_ops_dma_mode; ++ else if (data->ver == 2700) ++ ctlr->mem_ops = &aspeed_2700_spi_ops_dma_mode; ++ } else if ((aspi->flag & ASPEED_SPI_PURE_USER_MODE) != 0) { ++ dev_info(&pdev->dev, "user mode is used\n"); ++ ctlr->mem_ops = &aspeed_spi_mem_ops_pure_user; ++ } else { ++ dev_info(&pdev->dev, "user mode and normal read are used\n"); ++ ctlr->mem_ops = &aspeed_spi_mem_ops; ++ } ++ ++ if (ctlr->num_chipselect == 0) { ++ dev_warn(&pdev->dev, "Force num_chipselect to 1\n"); ++ ctlr->num_chipselect = 1; ++ } ++ ++ aspi->num_cs = ctlr->num_chipselect; ++ ++ ret = aspeed_spi_chip_set_default_window(aspi); ++ if (ret) { ++ dev_err(&pdev->dev, "fail to set default window\n"); ++ goto end; ++ } ++ + ret = devm_spi_register_controller(dev, ctlr); +- if (ret) ++ if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed\n"); ++ goto end; ++ } + ++end: + return ret; + } + +@@ -783,6 +1753,13 @@ + { + struct aspeed_spi *aspi = platform_get_drvdata(pdev); + ++ if (aspi->op_buf) { ++ dma_free_coherent(aspi->dev, ++ ASPEED_SPI_OP_BUF_LEN, ++ aspi->op_buf, ++ aspi->dma_addr_phy); ++ } ++ + aspeed_spi_enable(aspi, false); + } + +@@ -795,19 +1772,20 @@ + * The address range is encoded with absolute addresses in the overall + * mapping window. + */ +-static u32 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg) ++static u64 aspeed_spi_segment_start(struct aspeed_spi *aspi, u32 reg) + { + return ((reg >> 16) & 0xFF) << 23; + } + +-static u32 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg) ++static u64 aspeed_spi_segment_end(struct aspeed_spi *aspi, u32 reg) + { + return ((reg >> 24) & 0xFF) << 23; + } + +-static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end) ++static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u64 start, u64 end) + { +- return (((start >> 23) & 0xFF) << 16) | (((end >> 23) & 0xFF) << 24); ++ return (u32)((((start >> 23) & 0xFF) << 16) | ++ (((end >> 23) & 0xFF) << 24)); + } + + /* +@@ -817,18 +1795,18 @@ + + #define AST2600_SEG_ADDR_MASK 0x0ff00000 + +-static u32 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi, ++static u64 aspeed_spi_segment_ast2600_start(struct aspeed_spi *aspi, + u32 reg) + { +- u32 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; ++ u64 start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; + + return aspi->ahb_base_phy + start_offset; + } + +-static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi, ++static u64 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi, + u32 reg) + { +- u32 end_offset = reg & AST2600_SEG_ADDR_MASK; ++ u64 end_offset = reg & AST2600_SEG_ADDR_MASK; + + /* segment is disabled */ + if (!end_offset) +@@ -838,28 +1816,193 @@ + } + + static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi, +- u32 start, u32 end) ++ u64 start, u64 end) + { + /* disable zero size segments */ + if (start == end) + return 0; + +- return ((start & AST2600_SEG_ADDR_MASK) >> 16) | +- ((end - 1) & AST2600_SEG_ADDR_MASK); ++ return (u32)((start & AST2600_SEG_ADDR_MASK) >> 16) | ++ ((end - 1) & AST2600_SEG_ADDR_MASK); ++} ++ ++static u64 aspeed_spi_segment_ast2700_start(struct aspeed_spi *aspi, ++ u32 reg) ++{ ++ u64 start_offset = (((reg) & 0x0000ffff) << 16); ++ ++ if (start_offset == 0) ++ return aspi->ahb_base_phy; ++ ++ return aspi->ahb_base_phy + start_offset; ++} ++ ++static u64 aspeed_spi_segment_ast2700_end(struct aspeed_spi *aspi, ++ u32 reg) ++{ ++ u64 end_offset = reg & 0xffff0000; ++ ++ /* Meaningless end_offset, set to physical ahb base. */ ++ if (end_offset == 0) ++ return aspi->ahb_base_phy; ++ ++ return aspi->ahb_base_phy + end_offset; ++} ++ ++static u32 aspeed_spi_segment_ast2700_reg(struct aspeed_spi *aspi, ++ u64 start, u64 end) ++{ ++ if (start == end) ++ return 0; ++ ++ return (u32)((((start) >> 16) & 0x7fff) | ++ ((end + 1) & 0x7fff0000)); ++} ++ ++static const u32 aspeed_spi_hclk_divs[] = { ++ /* HCLK, HCLK/2, HCLK/3, HCLK/4, HCLK/5, ..., HCLK/16 */ ++ 0xf, 0x7, 0xe, 0x6, 0xd, ++ 0x5, 0xc, 0x4, 0xb, 0x3, ++ 0xa, 0x2, 0x9, 0x1, 0x8, ++ 0x0 ++}; ++ ++#define ASPEED_SPI_HCLK_DIV(i) \ ++ (aspeed_spi_hclk_divs[(i)] << CTRL_FREQ_SEL_SHIFT) ++ ++/* Transfer maximum clock frequency to register setting */ ++static u32 apseed_get_clk_div_ast2400(struct aspeed_spi_chip *chip, ++ u32 max_hz) ++{ ++ struct device *dev = chip->aspi->dev; ++ u32 hclk_clk = chip->aspi->clk_freq; ++ u32 hclk_div = 0; ++ u32 i; ++ bool found = false; ++ ++ /* FMC/SPIR10[11:8] */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (hclk_clk / (i + 1) <= max_hz) { ++ found = true; ++ break; ++ } ++ } ++ ++ if (found) { ++ hclk_div = ASPEED_SPI_HCLK_DIV(i); ++ chip->clk_freq = hclk_clk / (i + 1); ++ } ++ ++ dev_dbg(dev, "found: %s, hclk: %d, max_clk: %d\n", ++ found ? "yes" : "no", hclk_clk, max_hz); ++ ++ if (found) { ++ dev_dbg(dev, "h_div: %d (mask 0x%08x), speed: %d\n", ++ i + 1, hclk_div, chip->clk_freq); ++ } ++ ++ return hclk_div; ++} ++ ++static u32 apseed_get_clk_div_ast2500(struct aspeed_spi_chip *chip, ++ u32 max_hz) ++{ ++ struct device *dev = chip->aspi->dev; ++ u32 hclk_clk = chip->aspi->clk_freq; ++ u32 hclk_div = 0; ++ u32 i; ++ bool found = false; ++ ++ /* FMC/SPIR10[11:8] */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (hclk_clk / (i + 1) <= max_hz) { ++ found = true; ++ chip->clk_freq = hclk_clk / (i + 1); ++ break; ++ } ++ } ++ ++ if (found) { ++ hclk_div = ASPEED_SPI_HCLK_DIV(i); ++ goto end; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (hclk_clk / ((i + 1) * 4) <= max_hz) { ++ found = true; ++ chip->clk_freq = hclk_clk / ((i + 1) * 4); ++ break; ++ } ++ } ++ ++ if (found) ++ hclk_div = BIT(13) | ASPEED_SPI_HCLK_DIV(i); ++ ++end: ++ dev_dbg(dev, "found: %s, hclk: %d, max_clk: %d\n", ++ found ? "yes" : "no", hclk_clk, max_hz); ++ ++ if (found) { ++ dev_dbg(dev, "h_div: %d (mask %x), speed: %d\n", ++ i + 1, hclk_div, chip->clk_freq); ++ } ++ ++ return hclk_div; ++} ++ ++static u32 apseed_get_clk_div_ast2600(struct aspeed_spi_chip *chip, ++ u32 max_hz) ++{ ++ struct device *dev = chip->aspi->dev; ++ u32 hclk_clk = chip->aspi->clk_freq; ++ u32 hclk_div = 0; ++ u32 i, j; ++ bool found = false; ++ ++ /* FMC/SPIR10[27:24] */ ++ for (j = 0; j < 16; j++) { ++ /* FMC/SPIR10[11:8] */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (i == 0 && j == 0) ++ continue; ++ ++ if (hclk_clk / (i + 1 + (j * 16)) <= max_hz) { ++ found = true; ++ break; ++ } ++ } ++ ++ if (found) { ++ hclk_div = ((j << 24) | ASPEED_SPI_HCLK_DIV(i)); ++ chip->clk_freq = hclk_clk / (i + 1 + j * 16); ++ break; ++ } ++ } ++ ++ dev_dbg(dev, "found: %s, hclk: %d, max_clk: %d\n", ++ found ? "yes" : "no", hclk_clk, max_hz); ++ ++ if (found) { ++ dev_dbg(dev, "base_clk: %d, h_div: %d (mask %x), speed: %d\n", ++ j, i + 1, hclk_div, chip->clk_freq); ++ } ++ ++ return hclk_div; + } + + /* + * Read timing compensation sequences + */ + +-#define CALIBRATE_BUF_SIZE SZ_16K ++#define CALIBRATE_BUF_SIZE SZ_4K ++#define CALIBRATE_REPEAT_COUNT 1 + + static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip, + const u8 *golden_buf, u8 *test_buf) + { + int i; + +- for (i = 0; i < 10; i++) { ++ for (i = 0; i < CALIBRATE_REPEAT_COUNT; i++) { + memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); + if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) { + #if defined(VERBOSE_DEBUG) +@@ -872,7 +2015,10 @@ + return true; + } + +-#define FREAD_TPASS(i) (((i) / 2) | (((i) & 1) ? 0 : 8)) ++static inline u32 FREAD_TPASS(int i) ++{ ++ return (((i) / 2) | (((i) & 1) ? 8 : 0)); ++} + + /* + * The timing register is shared by all devices. Only update for CE0. +@@ -902,7 +2048,7 @@ + pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); + dev_dbg(aspi->dev, + " * [%08x] %d HCLK delay, %dns DI delay : %s", +- fread_timing_val, i / 2, (i & 1) ? 0 : 4, ++ fread_timing_val, i / 2, (i & 1) ? 4 : 0, + pass ? "PASS" : "FAIL"); + if (pass) { + pass_count++; +@@ -948,17 +2094,6 @@ + return cnt >= 64; + } + +-static const u32 aspeed_spi_hclk_divs[] = { +- 0xf, /* HCLK */ +- 0x7, /* HCLK/2 */ +- 0xe, /* HCLK/3 */ +- 0x6, /* HCLK/4 */ +- 0xd, /* HCLK/5 */ +-}; +- +-#define ASPEED_SPI_HCLK_DIV(i) \ +- (aspeed_spi_hclk_divs[(i) - 1] << CTRL_FREQ_SEL_SHIFT) +- + static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip) + { + struct aspeed_spi *aspi = chip->aspi; +@@ -969,10 +2104,17 @@ + u8 *golden_buf = NULL; + u8 *test_buf = NULL; + int i, rc, best_div = -1; ++ u32 clk_div_config = 0; ++ u32 freq = 0; + + dev_dbg(aspi->dev, "calculate timing compensation - AHB freq: %d MHz", + ahb_freq / 1000000); + ++ if ((aspi->flag & ASPEED_SPI_TIMING_CLB_DISABLED) != 0) { ++ dev_info(aspi->dev, "timing calibration is disabled\n"); ++ return 0; ++ } ++ + /* + * use the related low frequency to get check calibration data + * and get golden data. +@@ -988,7 +2130,7 @@ + + memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE); + if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) { +- dev_info(aspi->dev, "Calibration area too uniform, using low speed"); ++ dev_info(aspi->dev, "Calibration area too uniform\n"); + goto no_calib; + } + +@@ -998,42 +2140,81 @@ + #endif + + /* Now we iterate the HCLK dividers until we find our breaking point */ +- for (i = ARRAY_SIZE(aspeed_spi_hclk_divs); i > data->hdiv_max - 1; i--) { +- u32 tv, freq; ++ for (i = data->hdiv_max; i <= 5; i++) { ++ u32 tv; + + freq = ahb_freq / i; + if (freq > max_freq) + continue; + + /* Set the timing */ +- tv = chip->ctl_val[ASPEED_SPI_READ] | ASPEED_SPI_HCLK_DIV(i); ++ tv = chip->ctl_val[ASPEED_SPI_READ] & data->hclk_mask; ++ tv |= ASPEED_SPI_HCLK_DIV(i - 1); + writel(tv, chip->ctl); + dev_dbg(aspi->dev, "Trying HCLK/%d [%08x] ...", i, tv); + rc = data->calibrate(chip, i, golden_buf, test_buf); +- if (rc == 0) ++ if (rc == 0) { + best_div = i; ++ break; ++ } + } + ++no_calib: ++ + /* Nothing found ? */ + if (best_div < 0) { +- dev_warn(aspi->dev, "No good frequency, using dumb slow"); ++ if (data->get_clk_div) ++ clk_div_config = data->get_clk_div(chip, max_freq); + } else { + dev_dbg(aspi->dev, "Found good read timings at HCLK/%d", best_div); +- +- /* Record the freq */ +- for (i = 0; i < ASPEED_SPI_MAX; i++) +- chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) | +- ASPEED_SPI_HCLK_DIV(best_div); ++ chip->clk_freq = freq; ++ clk_div_config = ASPEED_SPI_HCLK_DIV(best_div - 1); + } + +-no_calib: ++ /* Record the freq */ ++ for (i = 0; i < ASPEED_SPI_MAX; i++) ++ chip->ctl_val[i] = (chip->ctl_val[i] & data->hclk_mask) | ++ clk_div_config; ++ + writel(chip->ctl_val[ASPEED_SPI_READ], chip->ctl); ++ + kfree(test_buf); ++ + return 0; + } + ++static int get_mid_point_of_longest_one(u8 *buf, u32 len) ++{ ++ int i; ++ int start = 0, mid_point = 0; ++ int max_cnt = 0, cnt = 0; ++ ++ for (i = 0; i < len; i++) { ++ if (buf[i] == 1) { ++ cnt++; ++ } else { ++ cnt = 0; ++ start = i; ++ } ++ ++ if (max_cnt < cnt) { ++ max_cnt = cnt; ++ mid_point = start + (cnt / 2); ++ } ++ } ++ ++ /* ++ * In order to get a stable SPI read timing, ++ * abandon the result if the length of longest ++ * consecutive good points is too short. ++ */ ++ if (max_cnt < 4) ++ return -1; ++ ++ return mid_point; ++} ++ + #define TIMING_DELAY_DI BIT(3) +-#define TIMING_DELAY_HCYCLE_MAX 5 + #define TIMING_REG_AST2600(chip) \ + ((chip)->aspi->regs + (chip)->aspi->data->timing + \ + (chip)->cs * 4) +@@ -1041,60 +2222,136 @@ + static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv, + const u8 *golden_buf, u8 *test_buf) + { ++ struct device *dev = chip->aspi->dev; + struct aspeed_spi *aspi = chip->aspi; + int hcycle; + u32 shift = (hdiv - 2) << 3; +- u32 mask = ~(0xfu << shift); ++ u32 mask = ~(0xffu << shift); + u32 fread_timing_val = 0; ++ u8 *calib_res = NULL; ++ int calib_point; ++ u32 final_delay; ++ int delay_ns; ++ bool pass; + +- for (hcycle = 0; hcycle <= TIMING_DELAY_HCYCLE_MAX; hcycle++) { +- int delay_ns; +- bool pass = false; +- +- fread_timing_val &= mask; +- fread_timing_val |= hcycle << shift; +- +- /* no DI input delay first */ +- writel(fread_timing_val, TIMING_REG_AST2600(chip)); +- pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); +- dev_dbg(aspi->dev, +- " * [%08x] %d HCLK delay, DI delay none : %s", +- fread_timing_val, hcycle, pass ? "PASS" : "FAIL"); +- if (pass) +- return 0; ++ calib_res = kzalloc(6 * 17, GFP_KERNEL); ++ if (!calib_res) ++ return -ENOMEM; + +- /* Add DI input delays */ ++ for (hcycle = 0; hcycle <= 5; hcycle++) { + fread_timing_val &= mask; + fread_timing_val |= (TIMING_DELAY_DI | hcycle) << shift; + +- for (delay_ns = 0; delay_ns < 0x10; delay_ns++) { +- fread_timing_val &= ~(0xf << (4 + shift)); ++ for (delay_ns = 0; delay_ns < 16; delay_ns++) { ++ fread_timing_val &= ~(0xfu << (4 + shift)); + fread_timing_val |= delay_ns << (4 + shift); + + writel(fread_timing_val, TIMING_REG_AST2600(chip)); + pass = aspeed_spi_check_reads(chip, golden_buf, test_buf); + dev_dbg(aspi->dev, + " * [%08x] %d HCLK delay, DI delay %d.%dns : %s", +- fread_timing_val, hcycle, (delay_ns + 1) / 2, +- (delay_ns + 1) & 1 ? 5 : 5, pass ? "PASS" : "FAIL"); +- /* +- * TODO: This is optimistic. We should look +- * for a working interval and save the middle +- * value in the read timing register. +- */ +- if (pass) +- return 0; ++ fread_timing_val, hcycle, delay_ns / 2, ++ (delay_ns & 1) ? 5 : 0, pass ? "PASS" : "FAIL"); ++ ++ calib_res[hcycle * 17 + delay_ns] = pass; + } + } + +- /* No good setting for this frequency */ +- return -1; ++ calib_point = get_mid_point_of_longest_one(calib_res, 6 * 17); ++ ++ if (calib_point < 0) { ++ dev_info(dev, "[HCLK/%d] cannot get good calibration point.\n", ++ hdiv); ++ kfree(calib_res); ++ ++ return -1; ++ } ++ ++ hcycle = calib_point / 17; ++ delay_ns = calib_point % 17; ++ ++ dev_dbg(dev, "final hcycle: %d, delay_ns: %d\n", hcycle, ++ delay_ns); ++ ++ final_delay = (TIMING_DELAY_DI | hcycle | (delay_ns << 4)) << shift; ++ writel(final_delay, TIMING_REG_AST2600(chip)); ++ ++ kfree(calib_res); ++ ++ return 0; ++} ++ ++static void aspeed_spi_ast2600_fill_safs_cmd(struct aspeed_spi *aspi, ++ struct spi_mem_op *op) ++{ ++ u32 tmp_val; ++ ++ if (op->data.dir == SPI_MEM_DATA_IN) { ++ tmp_val = readl(aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL4); ++ if (op->addr.nbytes == 4) ++ tmp_val = (tmp_val & 0xffff00ff) | (op->cmd.opcode << 8); ++ else ++ tmp_val = (tmp_val & 0xffffff00) | op->cmd.opcode; ++ ++ tmp_val = (tmp_val & 0x0fffffff) | ++ aspeed_spi_get_io_mode(op, SPI_OP_ALL); ++ ++ writel(tmp_val, aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL4); ++ ++ } else if (op->data.dir == SPI_MEM_DATA_OUT) { ++ tmp_val = readl(aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL4); ++ tmp_val = (tmp_val & 0xf0ffffff) | ++ (aspeed_spi_get_io_mode(op, SPI_OP_ALL) >> 4); ++ ++ writel(tmp_val, aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL4); ++ ++ tmp_val = readl(aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL2); ++ if (op->addr.nbytes == 4) ++ tmp_val = (tmp_val & 0xffff00ff) | (op->cmd.opcode << 8); ++ else ++ tmp_val = (tmp_val & 0xffffff00) | op->cmd.opcode; ++ ++ writel(tmp_val, aspi->regs + HOST_DIRECT_ACCESS_CMD_CTRL2); ++ } ++} ++ ++static void aspeed_spi_ast2700_safs_init(struct aspeed_spi *aspi, ++ struct spi_mem_op *op) ++{ ++ u32 val; ++ ++ (void)op; ++ ++ val = readl(aspi->regs + MISC_CTRL_REG); ++ val |= SPI_UNALGNED_ACCESS | SPI_USER_CMD_MODE; ++ val &= ~SPI_CS_CONTINUOUS; ++ writel(val, aspi->regs + MISC_CTRL_REG); ++} ++ ++static void aspeed_spi_ast2700_safs_start(struct aspeed_spi *aspi) ++{ ++ u32 val; ++ ++ val = readl(aspi->regs + MISC_CTRL_REG); ++ val |= SPI_UNALGNED_ACCESS | SPI_USER_CMD_MODE; ++ val &= ~SPI_CS_CONTINUOUS; ++ writel(val, aspi->regs + MISC_CTRL_REG); ++} ++ ++static void aspeed_spi_ast2700_safs_stop(struct aspeed_spi *aspi) ++{ ++ u32 val; ++ ++ val = readl(aspi->regs + MISC_CTRL_REG); ++ val &= ~GENMASK(27, 24); ++ writel(val, aspi->regs + MISC_CTRL_REG); + } + + /* + * Platform definitions + */ + static const struct aspeed_spi_data ast2400_fmc_data = { ++ .ver = 2400, + .max_cs = 5, + .hastype = true, + .we0 = 16, +@@ -1102,13 +2359,17 @@ + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xfffff0ff, + .hdiv_max = 1, ++ .min_window_sz = 0x800000, ++ .get_clk_div = apseed_get_clk_div_ast2400, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, ++ .adjust_window = aspeed_adjust_window_ast2400, + }; + + static const struct aspeed_spi_data ast2400_spi_data = { ++ .ver = 2400, + .max_cs = 1, + .hastype = false, + .we0 = 0, +@@ -1116,11 +2377,13 @@ + .timing = 0x14, + .hclk_mask = 0xfffff0ff, + .hdiv_max = 1, ++ .get_clk_div = apseed_get_clk_div_ast2400, + .calibrate = aspeed_spi_calibrate, + /* No segment registers */ + }; + + static const struct aspeed_spi_data ast2500_fmc_data = { ++ .ver = 2500, + .max_cs = 3, + .hastype = true, + .we0 = 16, +@@ -1128,13 +2391,17 @@ + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xffffd0ff, + .hdiv_max = 1, ++ .min_window_sz = 0x800000, ++ .get_clk_div = apseed_get_clk_div_ast2500, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, ++ .adjust_window = aspeed_adjust_window_ast2500, + }; + + static const struct aspeed_spi_data ast2500_spi_data = { ++ .ver = 2500, + .max_cs = 2, + .hastype = false, + .we0 = 16, +@@ -1142,13 +2409,17 @@ + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xffffd0ff, + .hdiv_max = 1, ++ .min_window_sz = 0x800000, ++ .get_clk_div = apseed_get_clk_div_ast2500, + .calibrate = aspeed_spi_calibrate, + .segment_start = aspeed_spi_segment_start, + .segment_end = aspeed_spi_segment_end, + .segment_reg = aspeed_spi_segment_reg, ++ .adjust_window = aspeed_adjust_window_ast2500, + }; + + static const struct aspeed_spi_data ast2600_fmc_data = { ++ .ver = 2600, + .max_cs = 3, + .hastype = false, + .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, +@@ -1157,13 +2428,17 @@ + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xf0fff0ff, + .hdiv_max = 2, ++ .min_window_sz = 0x200000, ++ .get_clk_div = apseed_get_clk_div_ast2600, + .calibrate = aspeed_spi_ast2600_calibrate, + .segment_start = aspeed_spi_segment_ast2600_start, + .segment_end = aspeed_spi_segment_ast2600_end, + .segment_reg = aspeed_spi_segment_ast2600_reg, ++ .adjust_window = aspeed_adjust_window_ast2600, + }; + + static const struct aspeed_spi_data ast2600_spi_data = { ++ .ver = 2600, + .max_cs = 2, + .hastype = false, + .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, +@@ -1172,10 +2447,53 @@ + .timing = CE0_TIMING_COMPENSATION_REG, + .hclk_mask = 0xf0fff0ff, + .hdiv_max = 2, ++ .min_window_sz = 0x200000, ++ .get_clk_div = apseed_get_clk_div_ast2600, + .calibrate = aspeed_spi_ast2600_calibrate, + .segment_start = aspeed_spi_segment_ast2600_start, + .segment_end = aspeed_spi_segment_ast2600_end, + .segment_reg = aspeed_spi_segment_ast2600_reg, ++ .adjust_window = aspeed_adjust_window_ast2600, ++ .safs_init = aspeed_spi_ast2600_fill_safs_cmd, ++}; ++ ++static const struct aspeed_spi_data ast2700_fmc_data = { ++ .ver = 2700, ++ .max_cs = 3, ++ .hastype = false, ++ .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, ++ .we0 = 16, ++ .ctl0 = CE0_CTRL_REG, ++ .timing = CE0_TIMING_COMPENSATION_REG, ++ .hclk_mask = 0xf0fff0ff, ++ .hdiv_max = 2, ++ .min_window_sz = 0x10000, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .calibrate = aspeed_spi_ast2600_calibrate, ++ .segment_start = aspeed_spi_segment_ast2700_start, ++ .segment_end = aspeed_spi_segment_ast2700_end, ++ .segment_reg = aspeed_spi_segment_ast2700_reg, ++}; ++ ++static const struct aspeed_spi_data ast2700_spi_data = { ++ .ver = 2700, ++ .max_cs = 2, ++ .hastype = false, ++ .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, ++ .we0 = 16, ++ .ctl0 = CE0_CTRL_REG, ++ .timing = CE0_TIMING_COMPENSATION_REG, ++ .hclk_mask = 0xf0fff0ff, ++ .hdiv_max = 2, ++ .min_window_sz = 0x10000, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .calibrate = aspeed_spi_ast2600_calibrate, ++ .segment_start = aspeed_spi_segment_ast2700_start, ++ .segment_end = aspeed_spi_segment_ast2700_end, ++ .segment_reg = aspeed_spi_segment_ast2700_reg, ++ .safs_init = aspeed_spi_ast2700_safs_init, ++ .safs_start = aspeed_spi_ast2700_safs_start, ++ .safs_stop = aspeed_spi_ast2700_safs_stop, + }; + + static const struct of_device_id aspeed_spi_matches[] = { +@@ -1185,6 +2503,8 @@ + { .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data }, + { .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data }, + { .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data }, ++ { .compatible = "aspeed,ast2700-fmc", .data = &ast2700_fmc_data }, ++ { .compatible = "aspeed,ast2700-spi", .data = &ast2700_spi_data }, + { } + }; + MODULE_DEVICE_TABLE(of, aspeed_spi_matches); +diff --git a/drivers/spi/spi-aspeed-txrx.c b/drivers/spi/spi-aspeed-txrx.c +--- a/drivers/spi/spi-aspeed-txrx.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/spi/spi-aspeed-txrx.c 2025-12-23 10:16:21.065033658 +0000 +@@ -0,0 +1,622 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (C) ASPEED Technology Inc. ++ * Ryan Chen ++ * Chin-Ting Kuo ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SPI_CONFIG 0x00 ++#define SPI_CTRL 0x04 ++#define SPI_CE0_CTRL 0x10 ++#define SPI_DECODE_ADDR_REG 0x30 ++#define SPI_MISC_CTRL 0x54 ++ ++#define SPI_FULL_DUPLEX_RX_REG 0x1e4 ++ ++#define SPI_IO_MASK (0xf0000000) ++#define SPI_DUAL_IO_MODE (0x3 << 28) ++#define SPI_QUAD_IO_MODE (0x5 << 28) ++ ++#define SPI_LSB_FIRST_CTRL BIT(5) ++#define SPI_CE_INACTIVE BIT(2) ++#define SPI_CMD_USER_MODE (0x3) ++ ++struct aspeed_spi_host { ++ phys_addr_t ahb_base_phy; ++ size_t ahb_window_sz; ++ void __iomem *ctrl_reg; ++ struct spi_controller *ctrl; ++ struct spi_device *spi_dev; ++ struct device *dev; ++ struct clk *clk; ++ u32 ahb_clk; ++ u32 ctrl_val[5]; ++ void __iomem *chip_ahb_base[5]; ++ u8 cs_change; ++ const struct aspeed_spi_info *info; ++}; ++ ++struct aspeed_spi_info { ++ u32 max_cs; ++ size_t min_window_sz; ++ u32 hclk_mask; ++ u32 (*get_clk_div)(struct aspeed_spi_host *host, u32 hz); ++ void (*set_segment)(struct aspeed_spi_host *host); ++}; ++ ++static inline void spi_aspeed_dump_buf(const u8 *buf, u32 len) ++{ ++ u32 i; ++ ++ if (len > 10) { ++ for (i = 0; i < 10; i++) ++ pr_info("%02x ", buf[i]); ++ } else { ++ for (i = 0; i < len; i++) ++ pr_info("%02x ", buf[i]); ++ } ++ pr_info("\n"); ++} ++ ++#define G5_SEGMENT_ADDR_VALUE(start, end) \ ++ (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24)) ++ ++static void aspeed_spi_set_segment_addr_ast2500(struct aspeed_spi_host *host) ++{ ++ const struct aspeed_spi_info *info = host->info; ++ u32 cs; ++ phys_addr_t start = host->ahb_base_phy; ++ phys_addr_t end; ++ u32 reg_val; ++ ++ for (cs = 0; cs < info->max_cs; cs++) { ++ end = start + info->min_window_sz; ++ if (cs == info->max_cs - 1) ++ end = host->ahb_base_phy + host->ahb_window_sz; ++ ++ reg_val = G5_SEGMENT_ADDR_VALUE(start, end); ++ writel(reg_val, host->ctrl_reg + SPI_CE0_CTRL + cs * 4); ++ /* always mapping min_window_sz due to user mode is used */ ++ host->chip_ahb_base[cs] = devm_ioremap(host->dev, ++ start, ++ info->min_window_sz); ++ start = end; ++ } ++} ++ ++#define G6_SEGMENT_ADDR_VALUE(start, end) \ ++ ((((start) & 0x0ff00000) >> 16) | (((end) - 1) & 0x0ff00000)) ++ ++static void aspeed_spi_set_segment_addr_ast2600(struct aspeed_spi_host *host) ++{ ++ const struct aspeed_spi_info *info = host->info; ++ u32 cs; ++ phys_addr_t start = host->ahb_base_phy; ++ phys_addr_t end; ++ u32 reg_val; ++ ++ for (cs = 0; cs < info->max_cs; cs++) { ++ end = start + info->min_window_sz; ++ reg_val = G6_SEGMENT_ADDR_VALUE(start, end); ++ writel(reg_val, host->ctrl_reg + SPI_DECODE_ADDR_REG + cs * 4); ++ host->chip_ahb_base[cs] = devm_ioremap(host->dev, ++ start, ++ info->min_window_sz); ++ start = end; ++ } ++} ++ ++#define G7_SEGMENT_ADDR_VALUE(start, end) \ ++ ((((start) >> 16) & 0x7fff) | (((end) + 1) & 0x7fff0000)) ++ ++static void aspeed_spi_set_segment_addr_ast2700(struct aspeed_spi_host *host) ++{ ++ const struct aspeed_spi_info *info = host->info; ++ u32 cs; ++ phys_addr_t start = host->ahb_base_phy; ++ phys_addr_t end; ++ u32 reg_val; ++ ++ for (cs = 0; cs < info->max_cs; cs++) { ++ end = start + info->min_window_sz; ++ reg_val = G7_SEGMENT_ADDR_VALUE(start - host->ahb_base_phy, ++ end - host->ahb_base_phy); ++ writel(reg_val, host->ctrl_reg + SPI_DECODE_ADDR_REG + cs * 4); ++ host->chip_ahb_base[cs] = devm_ioremap(host->dev, ++ start, ++ info->min_window_sz); ++ start = end; ++ } ++} ++ ++static const u32 aspeed_spi_hclk_divs[] = { ++ /* HCLK, HCLK/2, HCLK/3, HCLK/4, HCLK/5, ..., HCLK/16 */ ++ 0xf, 0x7, 0xe, 0x6, 0xd, ++ 0x5, 0xc, 0x4, 0xb, 0x3, ++ 0xa, 0x2, 0x9, 0x1, 0x8, ++ 0x0 ++}; ++ ++#define ASPEED_SPI_HCLK_DIV(i) (aspeed_spi_hclk_divs[(i)] << 8) ++ ++static u32 apseed_get_clk_div_ast2500(struct aspeed_spi_host *host, ++ u32 max_hz) ++{ ++ struct device *dev = host->dev; ++ u32 hclk_clk = host->ahb_clk; ++ u32 hclk_div = 0; ++ u32 i; ++ bool found = false; ++ ++ /* FMC/SPIR10[11:8] */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (hclk_clk / (i + 1) <= max_hz) { ++ found = true; ++ break; ++ } ++ } ++ ++ if (found) { ++ hclk_div = ASPEED_SPI_HCLK_DIV(i); ++ goto end; ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (hclk_clk / ((i + 1) * 4) <= max_hz) { ++ found = true; ++ break; ++ } ++ } ++ ++ if (found) ++ hclk_div = BIT(13) | ASPEED_SPI_HCLK_DIV(i); ++ ++end: ++ dev_dbg(dev, "found: %s, hclk: %d, max_clk: %d\n", ++ found ? "yes" : "no", hclk_clk, max_hz); ++ ++ if (found) { ++ dev_dbg(dev, "h_div: %d (mask %x)\n", ++ i + 1, hclk_div); ++ } ++ ++ return hclk_div; ++} ++ ++static u32 apseed_get_clk_div_ast2600(struct aspeed_spi_host *host, ++ u32 max_hz) ++{ ++ struct device *dev = host->dev; ++ u32 hclk_clk = host->ahb_clk; ++ u32 hclk_div = 0; ++ u32 i, j; ++ bool found = false; ++ ++ /* FMC/SPIR10[27:24] */ ++ for (j = 0; j < 16; j++) { ++ /* FMC/SPIR10[11:8] */ ++ for (i = 0; i < ARRAY_SIZE(aspeed_spi_hclk_divs); i++) { ++ if (i == 0 && j == 0) ++ continue; ++ ++ if (hclk_clk / (i + 1 + (j * 16)) <= max_hz) { ++ found = true; ++ break; ++ } ++ } ++ ++ if (found) { ++ hclk_div = ((j << 24) | ASPEED_SPI_HCLK_DIV(i)); ++ break; ++ } ++ } ++ ++ dev_dbg(dev, "found: %s, hclk: %d, max_clk: %d\n", ++ found ? "yes" : "no", hclk_clk, max_hz); ++ ++ if (found) { ++ dev_dbg(dev, "base_clk: %d, h_div: %d (mask %x)\n", ++ j, i + 1, hclk_div); ++ } ++ ++ return hclk_div; ++} ++ ++static void aspeed_spi_chip_set_type(struct aspeed_spi_host *host) ++{ ++ u32 reg; ++ u32 cs; ++ ++ for (cs = 0; cs < host->info->max_cs; cs++) { ++ reg = readl(host->ctrl_reg + SPI_CONFIG); ++ reg &= ~(0x3 << (cs * 2)); ++ reg |= 0x2 << (cs * 2); ++ writel(reg, host->ctrl_reg + SPI_CONFIG); ++ } ++} ++ ++static void aspeed_spi_enable(struct aspeed_spi_host *host, bool enable) ++{ ++ u32 cs; ++ u32 reg; ++ u32 we_bit; ++ ++ for (cs = 0; cs < host->info->max_cs; cs++) { ++ reg = readl(host->ctrl_reg + SPI_CONFIG); ++ we_bit = (0x1 << cs) << 16; ++ ++ if (enable) ++ reg |= we_bit; ++ else ++ reg &= ~we_bit; ++ ++ writel(reg, host->ctrl_reg + SPI_CONFIG); ++ } ++} ++ ++static int aspeed_spi_setup(struct spi_device *spi) ++{ ++ struct aspeed_spi_host *host = ++ (struct aspeed_spi_host *)spi_controller_get_devdata(spi->controller); ++ struct device *dev = host->dev; ++ u32 clk_div; ++ u8 cs = spi_get_chipselect(spi, 0); ++ void __iomem *ctrl_reg = host->ctrl_reg + SPI_CE0_CTRL + cs * 4; ++ u32 unsupport_mode = (u32)(~(SPI_MODE_0 | SPI_RX_DUAL | SPI_TX_DUAL | ++ SPI_RX_QUAD | SPI_TX_QUAD | SPI_LSB_FIRST)); ++ ++ dev_dbg(dev, "cs: %d, mode: %d, max_speed: %d, bits_per_word: %d\n", ++ cs, spi->mode, spi->max_speed_hz, spi->bits_per_word); ++ ++ if (spi->mode & unsupport_mode) { ++ dev_dbg(dev, "unsupported mode bits: %x\n", spi->mode); ++ return -EINVAL; ++ } ++ ++ host->ctrl_val[cs] = SPI_CE_INACTIVE | SPI_CMD_USER_MODE; ++ ++ if (spi->max_speed_hz) { ++ clk_div = host->info->get_clk_div(host, spi->max_speed_hz); ++ } else { ++ /* speed zero means "as slow as possible" */ ++ clk_div = ~(host->info->hclk_mask); ++ } ++ ++ host->ctrl_val[cs] |= clk_div; ++ ++ if (spi->mode & SPI_LSB_FIRST) ++ host->ctrl_val[cs] |= SPI_LSB_FIRST_CTRL; ++ ++ writel(host->ctrl_val[cs], ctrl_reg); ++ ++ dev_info(dev, "cs: %d, ctrl_val: 0x%08x\n", cs, host->ctrl_val[cs]); ++ ++ return 0; ++} ++ ++static void aspeed_spi_start_user(struct spi_device *spi) ++{ ++ struct aspeed_spi_host *host = ++ (struct aspeed_spi_host *)spi_controller_get_devdata(spi->controller); ++ u8 cs = spi_get_chipselect(spi, 0); ++ u32 ctrl_val = host->ctrl_val[cs]; ++ void __iomem *ctrl_reg = host->ctrl_reg + SPI_CE0_CTRL + cs * 4; ++ ++ ctrl_val |= SPI_CE_INACTIVE; ++ writel(ctrl_val, ctrl_reg); ++ ++ ctrl_val &= ~SPI_CE_INACTIVE; ++ writel(ctrl_val, ctrl_reg); ++} ++ ++static void aspeed_spi_stop_user(struct spi_device *spi) ++{ ++ struct aspeed_spi_host *host = ++ (struct aspeed_spi_host *)spi_controller_get_devdata(spi->controller); ++ u8 cs = spi_get_chipselect(spi, 0); ++ u32 ctrl_val = host->ctrl_val[cs] | SPI_CE_INACTIVE; ++ void __iomem *ctrl_reg = host->ctrl_reg + SPI_CE0_CTRL + cs * 4; ++ ++ writel(ctrl_val, ctrl_reg); ++} ++ ++static void aspeed_spi_transfer_tx(struct aspeed_spi_host *host, const u8 *tx_buf, ++ u8 *rx_buf, void *dst, u32 len) ++{ ++ u32 i; ++ ++ for (i = 0; i < len; i++) { ++ writeb(tx_buf[i], dst); ++ ++ if (rx_buf && tx_buf == rx_buf) ++ rx_buf[i] = readb(host->ctrl_reg + SPI_FULL_DUPLEX_RX_REG); ++ } ++} ++ ++static int aspeed_spi_transfer(struct spi_controller *ctlr, ++ struct spi_message *msg) ++{ ++ struct aspeed_spi_host *host = ++ (struct aspeed_spi_host *)spi_controller_get_devdata(ctlr); ++ struct device *dev = host->dev; ++ struct spi_device *spi = msg->spi; ++ struct spi_transfer *xfer; ++ const u8 *tx_buf; ++ u8 *rx_buf; ++ u32 cs; ++ u32 j = 0; ++ u32 ctrl_val, normal_mode; ++ void __iomem *ctrl_reg; ++ ++ if (host->cs_change == 0) ++ aspeed_spi_start_user(spi); ++ ++ cs = spi_get_chipselect(spi, 0); ++ ctrl_reg = host->ctrl_reg + SPI_CE0_CTRL + cs * 4; ++ ctrl_val = readl(ctrl_reg); ++ ++ normal_mode = readl(host->ctrl_reg + SPI_MISC_CTRL); ++ writel(0x0, host->ctrl_reg + SPI_MISC_CTRL); ++ ++ dev_dbg(dev, "cs: %d\n", cs); ++ ++ list_for_each_entry(xfer, &msg->transfers, transfer_list) { ++ dev_dbg(dev, ++ "xfer[%d]: width %d, len %u, tx %p, rx %p\n", ++ j, ++ xfer->bits_per_word, xfer->len, ++ xfer->tx_buf, xfer->rx_buf); ++ ++ tx_buf = xfer->tx_buf; ++ rx_buf = xfer->rx_buf; ++ ++ if (tx_buf) { ++ ctrl_val &= ~SPI_IO_MASK; ++ if (spi->mode & SPI_TX_DUAL) ++ ctrl_val |= SPI_DUAL_IO_MODE; ++ else if (spi->mode & SPI_TX_QUAD) ++ ctrl_val |= SPI_QUAD_IO_MODE; ++ writel(ctrl_val, ctrl_reg); ++ ++#if defined(SPI_ASPEED_TXRX_DBG) ++ pr_info("tx : "); ++ spi_aspeed_dump_buf(tx_buf, xfer->len); ++#endif ++ ++ aspeed_spi_transfer_tx(host, tx_buf, rx_buf, ++ (void *)host->chip_ahb_base[cs], ++ xfer->len); ++ } ++ ++ if (rx_buf && rx_buf != tx_buf) { ++ ctrl_val &= ~SPI_IO_MASK; ++ if (spi->mode & SPI_RX_DUAL) ++ ctrl_val |= SPI_DUAL_IO_MODE; ++ else if (spi->mode & SPI_RX_QUAD) ++ ctrl_val |= SPI_QUAD_IO_MODE; ++ writel(ctrl_val, ctrl_reg); ++ ++ ioread8_rep(host->chip_ahb_base[cs], rx_buf, xfer->len); ++ ++#if defined(SPI_ASPEED_TXRX_DBG) ++ pr_info("rx : "); ++ spi_aspeed_dump_buf(rx_buf, xfer->len); ++#endif ++ } ++ ++ msg->actual_length += xfer->len; ++ host->cs_change = xfer->cs_change; ++ j++; ++ } ++ ++ if (host->cs_change == 0) ++ aspeed_spi_stop_user(spi); ++ ++ msg->status = 0; ++ ++ writel(normal_mode, host->ctrl_reg + SPI_MISC_CTRL); ++ ++ spi_finalize_current_message(ctlr); ++ ++ return 0; ++} ++ ++static int aspeed_spi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct resource *res; ++ struct aspeed_spi_host *host; ++ struct spi_controller *ctrl; ++ struct reset_control *rst; ++ int err = 0; ++ ++ ctrl = devm_spi_alloc_master(dev, sizeof(struct aspeed_spi_host)); ++ if (!ctrl) { ++ dev_err(dev, "No memory for spi controller\n"); ++ return -ENOMEM; ++ } ++ ++ ctrl->mode_bits = SPI_MODE_0 | SPI_RX_DUAL | SPI_TX_DUAL | ++ SPI_RX_QUAD | SPI_TX_QUAD | SPI_LSB_FIRST; ++ ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ++ ctrl->dev.of_node = pdev->dev.of_node; ++ ctrl->bus_num = pdev->id; ++ ++ host = spi_controller_get_devdata(ctrl); ++ platform_set_drvdata(pdev, host); ++ ++ memset(host, 0, sizeof(struct aspeed_spi_host)); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "cannot get IORESOURCE_MEM 0\n"); ++ return -ENXIO; ++ } ++ ++ host->ctrl_reg = devm_ioremap_resource(dev, res); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (!res) { ++ dev_err(dev, "cannot get IORESOURCE_MEM 1\n"); ++ return -ENXIO; ++ } ++ ++ host->ahb_base_phy = res->start; ++ host->ahb_window_sz = resource_size(res); ++ ++ host->ctrl = spi_controller_get(ctrl); ++ host->dev = &pdev->dev; ++ host->info = of_device_get_match_data(&pdev->dev); ++ if (!host->info) ++ return -ENODEV; ++ ++ rst = devm_reset_control_get_optional(&pdev->dev, NULL); ++ if (rst) { ++ err = reset_control_deassert(rst); ++ if (err) { ++ dev_err(dev, "fail to deassert reset control\n"); ++ return -EBUSY; ++ } ++ } ++ ++ host->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(host->clk)) { ++ dev_err(dev, "missing clock\n"); ++ return PTR_ERR(host->clk); ++ } ++ ++ host->ahb_clk = clk_get_rate(host->clk); ++ if (!host->ahb_clk) { ++ dev_err(dev, "invalid clock\n"); ++ return -EINVAL; ++ } ++ ++ err = clk_prepare_enable(host->clk); ++ if (err) { ++ dev_err(dev, "can not enable the clock\n"); ++ return err; ++ } ++ ++ host->ctrl->setup = aspeed_spi_setup; ++ host->ctrl->transfer_one_message = aspeed_spi_transfer; ++ host->ctrl->num_chipselect = host->info->max_cs; ++ ++ /* configure minimum segment window */ ++ host->info->set_segment(host); ++ aspeed_spi_enable(host, true); ++ aspeed_spi_chip_set_type(host); ++ ++ err = devm_spi_register_controller(dev, host->ctrl); ++ if (err) { ++ dev_err(dev, "failed to register SPI controller\n"); ++ goto disable_clk; ++ } ++ ++ return 0; ++ ++disable_clk: ++ clk_disable_unprepare(host->clk); ++ ++ return err; ++} ++ ++static void aspeed_spi_remove(struct platform_device *pdev) ++{ ++ struct aspeed_spi_host *host = platform_get_drvdata(pdev); ++ ++ aspeed_spi_enable(host, false); ++ clk_disable_unprepare(host->clk); ++} ++ ++struct aspeed_spi_info ast2500_fmc_info = { ++ .max_cs = 3, ++ .min_window_sz = 0x800000, ++ .hclk_mask = 0xffffd0ff, ++ .get_clk_div = apseed_get_clk_div_ast2500, ++ .set_segment = aspeed_spi_set_segment_addr_ast2500, ++}; ++ ++struct aspeed_spi_info ast2500_spi_info = { ++ .max_cs = 2, ++ .min_window_sz = 0x800000, ++ .hclk_mask = 0xffffd0ff, ++ .get_clk_div = apseed_get_clk_div_ast2500, ++ .set_segment = aspeed_spi_set_segment_addr_ast2500, ++}; ++ ++struct aspeed_spi_info ast2600_fmc_info = { ++ .max_cs = 3, ++ .min_window_sz = 0x200000, ++ .hclk_mask = 0xf0fff0ff, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .set_segment = aspeed_spi_set_segment_addr_ast2600, ++}; ++ ++struct aspeed_spi_info ast2600_spi_info = { ++ .max_cs = 2, ++ .min_window_sz = 0x200000, ++ .hclk_mask = 0xf0fff0ff, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .set_segment = aspeed_spi_set_segment_addr_ast2600, ++}; ++ ++struct aspeed_spi_info ast2700_fmc_info = { ++ .max_cs = 3, ++ .min_window_sz = 0x10000, ++ .hclk_mask = 0xf0fff0ff, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .set_segment = aspeed_spi_set_segment_addr_ast2700, ++}; ++ ++struct aspeed_spi_info ast2700_spi_info = { ++ .max_cs = 2, ++ .min_window_sz = 0x10000, ++ .hclk_mask = 0xf0fff0ff, ++ .get_clk_div = apseed_get_clk_div_ast2600, ++ .set_segment = aspeed_spi_set_segment_addr_ast2700, ++}; ++ ++static const struct of_device_id aspeed_spi_of_match[] = { ++ { .compatible = "aspeed,ast2500-fmc-txrx", .data = &ast2500_fmc_info}, ++ { .compatible = "aspeed,ast2500-spi-txrx", .data = &ast2500_spi_info}, ++ { .compatible = "aspeed,ast2600-fmc-txrx", .data = &ast2600_fmc_info}, ++ { .compatible = "aspeed,ast2600-spi-txrx", .data = &ast2600_spi_info}, ++ { .compatible = "aspeed,ast2700-fmc-txrx", .data = &ast2700_fmc_info}, ++ { .compatible = "aspeed,ast2700-spi-txrx", .data = &ast2700_spi_info}, ++ { }, ++}; ++ ++static struct platform_driver aspeed_spi_driver = { ++ .probe = aspeed_spi_probe, ++ .remove = aspeed_spi_remove, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .of_match_table = aspeed_spi_of_match, ++ }, ++}; ++ ++module_platform_driver(aspeed_spi_driver); ++ ++MODULE_DESCRIPTION("ASPEED Pure SPI Driver"); ++MODULE_AUTHOR("Ryan Chen"); ++MODULE_AUTHOR("Chin-Ting Kuo"); ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/tty/serial/8250/8250_aspeed.c b/drivers/tty/serial/8250/8250_aspeed.c +--- a/drivers/tty/serial/8250/8250_aspeed.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/tty/serial/8250/8250_aspeed.c 2025-12-23 10:16:20.962035385 +0000 +@@ -0,0 +1,524 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) ASPEED Technology Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "8250.h" ++ ++#define DEVICE_NAME "aspeed-uart" ++#define UNKNOWN 0 ++#define AST2500_PLAT 1 ++#define AST2600_PLAT 2 ++#define AST2700_PLAT 3 ++ ++/* offsets for the aspeed virtual uart registers */ ++#define VUART_GCRA 0x20 ++#define VUART_GCRA_VUART_EN BIT(0) ++#define VUART_GCRA_SIRQ_POLARITY BIT(1) ++#define VUART_GCRA_CHARACTER_TIMEOUT_TIME_MASK GENMASK(3, 2) ++#define VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5) ++#define VUART_GCRB 0x24 ++#define VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4) ++#define VUART_GCRB_HOST_SIRQ_SHIFT 4 ++#define VUART_ADDRL 0x28 ++#define VUART_ADDRH 0x2c ++#define VUART_GCRG 0x38 ++#define VUART_GCRG_CHARACTER_TIMEOUT_TIME_CONTROL BIT(1) ++ ++#define DMA_TX_BUFSZ PAGE_SIZE ++#define DMA_RX_BUFSZ (64 * 1024) ++ ++struct uart_ops ast8250_pops; ++ ++struct ast8250_vuart { ++ u32 port; ++ u32 sirq; ++ u32 sirq_pol; ++ bool character_timeout_time_en; ++}; ++ ++struct ast8250_udma { ++ u32 ch; ++ ++ u32 tx_fifosz; ++ u32 rx_fifosz; ++ ++ dma_addr_t tx_addr; ++ dma_addr_t rx_addr; ++ ++ struct kfifo *tx_fifo; ++ struct kfifo *rx_fifo; ++ ++ bool tx_tmout_dis; ++ bool rx_tmout_dis; ++}; ++ ++struct ast8250_data { ++ int line; ++ ++ u8 __iomem *regs; ++ ++ bool is_vuart; ++ bool use_dma; ++ ++ struct reset_control *rst; ++ struct clk *clk; ++ ++ struct ast8250_vuart vuart; ++ struct ast8250_udma dma; ++}; ++ ++static void ast8250_dma_tx_complete(int tx_fifo_rptr, void *id) ++{ ++ unsigned long flags; ++ struct uart_port *port = (struct uart_port*)id; ++ struct ast8250_data *data = port->private_data; ++ struct kfifo *tx_fifo = data->dma.tx_fifo; ++ unsigned int len; ++ ++ spin_lock_irqsave(&port->lock, flags); ++ ++ len = kfifo_out(tx_fifo, NULL, tx_fifo_rptr); ++ port->icount.tx += len; ++ ++ if (kfifo_len(tx_fifo) < WAKEUP_CHARS) ++ uart_write_wakeup(port); ++ ++ spin_unlock_irqrestore(&port->lock, flags); ++} ++ ++static void ast8250_dma_rx_complete(int rx_fifo_wptr, void *id) ++{ ++ unsigned long flags; ++ struct uart_port *up = (struct uart_port*)id; ++ struct tty_port *tp = &up->state->port; ++ struct ast8250_data *data = up->private_data; ++ struct ast8250_udma *dma = &data->dma; ++ struct kfifo *rx_fifo = dma->rx_fifo; ++ u32 len = 0; ++ u8 buf[128]; ++ ++ spin_lock_irqsave(&up->lock, flags); ++ ++ dma_sync_single_for_cpu(up->dev, ++ dma->rx_addr, dma->rx_fifosz, DMA_FROM_DEVICE); ++ ++ while (!kfifo_is_empty(rx_fifo)) { ++ len = kfifo_out(rx_fifo, buf, sizeof(buf)); ++ tty_insert_flip_string(tp, buf, len); ++ up->icount.rx += len; ++ } ++ ++ tty_flip_buffer_push(tp); ++ ++ spin_unlock_irqrestore(&up->lock, flags); ++} ++ ++static void ast8250_dma_start_tx(struct uart_port *port) ++{ ++ struct ast8250_data *data = port->private_data; ++ struct ast8250_udma *dma = &data->dma; ++ struct kfifo *tx_fifo = dma->tx_fifo; ++ ++ dma_sync_single_for_device(port->dev, ++ dma->tx_addr, dma->tx_fifosz, DMA_TO_DEVICE); ++ ++ aspeed_udma_set_tx_wptr(dma->ch, kfifo_len(tx_fifo)); ++} ++ ++static void ast8250_dma_pops_hook(struct uart_port *port) ++{ ++ static int first = 1; ++ ++ if (first) { ++ ast8250_pops = *port->ops; ++ ast8250_pops.start_tx = ast8250_dma_start_tx; ++ } ++ ++ first = 0; ++ port->ops = &ast8250_pops; ++} ++ ++static void ast8250_vuart_init(struct ast8250_data *data) ++{ ++ u8 reg; ++ struct ast8250_vuart *vuart = &data->vuart; ++ ++ /* IO port address */ ++ writeb((u8)(vuart->port >> 0), data->regs + VUART_ADDRL); ++ writeb((u8)(vuart->port >> 8), data->regs + VUART_ADDRH); ++ ++ /* SIRQ number */ ++ reg = readb(data->regs + VUART_GCRB); ++ reg &= ~VUART_GCRB_HOST_SIRQ_MASK; ++ reg |= ((vuart->sirq << VUART_GCRB_HOST_SIRQ_SHIFT) & VUART_GCRB_HOST_SIRQ_MASK); ++ writeb(reg, data->regs + VUART_GCRB); ++ ++ /* SIRQ polarity */ ++ reg = readb(data->regs + VUART_GCRA); ++ if (vuart->sirq_pol) ++ reg |= VUART_GCRA_SIRQ_POLARITY; ++ else ++ reg &= ~VUART_GCRA_SIRQ_POLARITY; ++ writeb(reg, data->regs + VUART_GCRA); ++ ++ if (vuart->character_timeout_time_en) { ++ /* Character timeout time */ ++ reg = readb(data->regs + VUART_GCRA); ++ reg |= VUART_GCRA_CHARACTER_TIMEOUT_TIME_MASK; ++ writeb(reg, data->regs + VUART_GCRA); ++ ++ /* Character timeout time by LCLK control bit */ ++ reg = readb(data->regs + VUART_GCRG); ++ reg |= VUART_GCRG_CHARACTER_TIMEOUT_TIME_CONTROL; ++ writeb(reg, data->regs + VUART_GCRG); ++ } ++} ++ ++static void ast8250_vuart_set_host_tx_discard(struct ast8250_data *data, bool discard) ++{ ++ u8 reg; ++ ++ reg = readb(data->regs + VUART_GCRA); ++ if (discard) ++ reg &= ~VUART_GCRA_DISABLE_HOST_TX_DISCARD; ++ else ++ reg |= VUART_GCRA_DISABLE_HOST_TX_DISCARD; ++ writeb(reg, data->regs + VUART_GCRA); ++} ++ ++static void ast8250_vuart_set_enable(struct ast8250_data *data, bool enable) ++{ ++ u8 reg; ++ ++ reg = readb(data->regs + VUART_GCRA); ++ if (enable) ++ reg |= VUART_GCRA_VUART_EN; ++ else ++ reg &= ~VUART_GCRA_VUART_EN; ++ writeb(reg, data->regs + VUART_GCRA); ++} ++ ++static int ast8250_handle_irq(struct uart_port *port) ++{ ++ u32 iir = port->serial_in(port, UART_IIR); ++ return serial8250_handle_irq(port, iir); ++} ++ ++static int ast8250_startup(struct uart_port *port) ++{ ++ int rc = 0; ++ struct ast8250_data *data = port->private_data; ++ struct ast8250_udma *dma; ++ ++ if (data->is_vuart) ++ ast8250_vuart_set_host_tx_discard(data, false); ++ ++ if (data->use_dma) { ++ dma = &data->dma; ++ ++ dma->tx_fifosz = DMA_TX_BUFSZ; ++ dma->rx_fifosz = DMA_RX_BUFSZ; ++ ++ if (kfifo_alloc(dma->tx_fifo, dma->tx_fifosz, GFP_KERNEL)) { ++ dev_err(port->dev, "failed to allocate TX DMA ring buffer\n"); ++ rc = -ENOMEM; ++ goto out; ++ } ++ ++ if (kfifo_alloc(dma->rx_fifo, dma->rx_fifosz, GFP_KERNEL)) { ++ dev_err(port->dev, "failed to allocate RX DMA ring buffer\n"); ++ rc = -ENOMEM; ++ goto free_tx_fifo; ++ } ++ ++ dma->tx_addr = dma_map_single(port->dev, dma->tx_fifo->kfifo.data, ++ dma->tx_fifosz, DMA_TO_DEVICE); ++ if (dma_mapping_error(port->dev, dma->tx_addr)) { ++ dev_err(port->dev, "failed to map streaming TX DMA region\n"); ++ rc = -ENOMEM; ++ goto free_rx_fifo; ++ } ++ ++ dma->rx_addr = dma_map_single(port->dev, dma->rx_fifo->kfifo.data, ++ dma->rx_fifosz, DMA_FROM_DEVICE); ++ if (dma_mapping_error(port->dev, dma->rx_addr)) { ++ dev_err(port->dev, "failed to map streaming RX DMA region\n"); ++ rc = -ENOMEM; ++ goto free_rx_fifo; ++ } ++ ++ rc = aspeed_udma_request_tx_chan(dma->ch, dma->tx_addr, ++ dma->tx_fifo, dma->tx_fifosz, ast8250_dma_tx_complete, port, dma->tx_tmout_dis); ++ if (rc) { ++ dev_err(port->dev, "failed to request DMA TX channel\n"); ++ goto free_rx_fifo; ++ } ++ ++ rc = aspeed_udma_request_rx_chan(dma->ch, dma->rx_addr, ++ dma->rx_fifo, dma->rx_fifosz, ast8250_dma_rx_complete, port, dma->rx_tmout_dis); ++ if (rc) { ++ dev_err(port->dev, "failed to request DMA RX channel\n"); ++ goto free_rx_fifo; ++ } ++ ++ ast8250_dma_pops_hook(port); ++ ++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE); ++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE); ++ } ++ ++ memset(&port->icount, 0, sizeof(port->icount)); ++ return serial8250_do_startup(port); ++ ++free_rx_fifo: ++ kfifo_free(dma->rx_fifo); ++ ++free_tx_fifo: ++ kfifo_free(dma->tx_fifo); ++ ++out: ++ return rc; ++} ++ ++static void ast8250_shutdown(struct uart_port *port) ++{ ++ int rc; ++ struct ast8250_data *data = port->private_data; ++ struct ast8250_udma *dma; ++ ++ if (data->use_dma) { ++ dma = &data->dma; ++ ++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET); ++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET); ++ ++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE); ++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE); ++ ++ rc = aspeed_udma_free_tx_chan(dma->ch); ++ if (rc) ++ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc); ++ ++ rc = aspeed_udma_free_rx_chan(dma->ch); ++ if (rc) ++ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc); ++ ++ dma_unmap_single(port->dev, dma->tx_addr, ++ dma->tx_fifosz, DMA_TO_DEVICE); ++ dma_unmap_single(port->dev, dma->rx_addr, ++ dma->rx_fifosz, DMA_FROM_DEVICE); ++ ++ kfree(dma->tx_fifo); ++ kfree(dma->rx_fifo); ++ } ++ ++ if (data->is_vuart) ++ ast8250_vuart_set_host_tx_discard(data, true); ++ ++ serial8250_do_shutdown(port); ++} ++ ++static int __maybe_unused ast8250_suspend(struct device *dev) ++{ ++ struct ast8250_data *data = dev_get_drvdata(dev); ++ serial8250_suspend_port(data->line); ++ return 0; ++} ++ ++static int __maybe_unused ast8250_resume(struct device *dev) ++{ ++ struct ast8250_data *data = dev_get_drvdata(dev); ++ serial8250_resume_port(data->line); ++ return 0; ++} ++ ++static int ast8250_probe(struct platform_device *pdev) ++{ ++ int rc; ++ struct uart_8250_port uart = {}; ++ struct uart_port *port = &uart.port; ++ struct device *dev = &pdev->dev; ++ struct ast8250_data *data; ++ uint32_t plat = (unsigned long)of_device_get_match_data(dev); ++ ++ struct resource *res; ++ u32 irq; ++ ++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_err(dev, "cannot set 64-bits DMA mask\n"); ++ return rc; ++ } ++ ++ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); ++ if (data == NULL) ++ return -ENOMEM; ++ ++ data->dma.rx_fifo = devm_kzalloc(dev, sizeof(data->dma.rx_fifo), GFP_KERNEL); ++ if (!data->dma.rx_fifo) ++ return -ENOMEM; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ if (irq != -EPROBE_DEFER) ++ dev_err(dev, "failed to get IRQ number\n"); ++ return irq; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res == NULL) { ++ dev_err(dev, "failed to get register base\n"); ++ return -ENODEV; ++ } ++ ++ data->regs = devm_ioremap(dev, res->start, resource_size(res)); ++ if (IS_ERR(data->regs)) { ++ dev_err(dev, "failed to map registers\n"); ++ return PTR_ERR(data->regs); ++ } ++ ++ data->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(data->clk)) { ++ dev_err(dev, "failed to get clocks\n"); ++ return -ENODEV; ++ } ++ ++ rc = clk_prepare_enable(data->clk); ++ if (rc) { ++ dev_err(dev, "failed to enable clock\n"); ++ return rc; ++ } ++ ++ data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); ++ if (!IS_ERR(data->rst)) ++ reset_control_deassert(data->rst); ++ ++ data->is_vuart = of_property_read_bool(dev->of_node, "virtual"); ++ if (data->is_vuart) { ++ rc = of_property_read_u32(dev->of_node, "port", &data->vuart.port); ++ if (rc) { ++ dev_err(dev, "failed to get VUART port address\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "sirq", &data->vuart.sirq); ++ if (rc) { ++ dev_err(dev, "failed to get VUART SIRQ number\n"); ++ return -ENODEV; ++ } ++ ++ rc = of_property_read_u32(dev->of_node, "sirq-polarity", &data->vuart.sirq_pol); ++ if (rc) { ++ dev_err(dev, "failed to get VUART SIRQ polarity\n"); ++ return -ENODEV; ++ } ++ ++ if (plat == AST2700_PLAT) ++ data->vuart.character_timeout_time_en = true; ++ else ++ data->vuart.character_timeout_time_en = false; ++ ++ ast8250_vuart_init(data); ++ ast8250_vuart_set_host_tx_discard(data, true); ++ ast8250_vuart_set_enable(data, true); ++ } ++ ++ data->use_dma = of_property_read_bool(dev->of_node, "dma-mode"); ++ if (data->use_dma) { ++ dev_warn(dev, "DMA mode not ready\n"); ++ data->use_dma = false; ++ /* ++ rc = of_property_read_u32(dev->of_node, "dma-channel", &data->dma.ch); ++ if (rc) { ++ dev_err(dev, "failed to get DMA channel\n"); ++ return -ENODEV; ++ } ++ ++ data->dma.tx_tmout_dis = of_property_read_bool(dev->of_node, "dma-tx-timeout-disable"); ++ data->dma.rx_tmout_dis = of_property_read_bool(dev->of_node, "dma-rx-timeout-disable"); ++ */ ++ } ++ ++ spin_lock_init(&port->lock); ++ port->dev = dev; ++ port->type = PORT_16550A; ++ port->irq = irq; ++ port->line = of_alias_get_id(dev->of_node, "serial"); ++ port->handle_irq = ast8250_handle_irq; ++ port->mapbase = res->start; ++ port->mapsize = resource_size(res); ++ port->membase = data->regs; ++ port->uartclk = clk_get_rate(data->clk); ++ port->regshift = 2; ++ port->iotype = UPIO_MEM32; ++ port->flags = UPF_FIXED_TYPE | UPF_FIXED_PORT | UPF_SHARE_IRQ; ++ port->startup = ast8250_startup; ++ port->shutdown = ast8250_shutdown; ++ port->private_data = data; ++ uart.bugs |= UART_BUG_TXRACE; ++ ++ data->line = serial8250_register_8250_port(&uart); ++ if (data->line < 0) { ++ dev_err(dev, "failed to register 8250 port\n"); ++ return data->line; ++ } ++ ++ pm_runtime_set_active(&pdev->dev); ++ pm_runtime_enable(&pdev->dev); ++ ++ platform_set_drvdata(pdev, data); ++ return 0; ++} ++ ++static void ast8250_remove(struct platform_device *pdev) ++{ ++ struct ast8250_data *data = platform_get_drvdata(pdev); ++ ++ if (data->is_vuart) ++ ast8250_vuart_set_enable(data, false); ++ ++ serial8250_unregister_port(data->line); ++} ++ ++static const struct dev_pm_ops ast8250_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(ast8250_suspend, ast8250_resume) ++}; ++ ++static const struct of_device_id ast8250_of_match[] = { ++ { .compatible = "aspeed,ast2500-uart", .data = (void *)AST2500_PLAT}, ++ { .compatible = "aspeed,ast2600-uart", .data = (void *)AST2600_PLAT}, ++ { .compatible = "aspeed,ast2700-uart", .data = (void *)AST2700_PLAT}, ++ { }, ++}; ++ ++static struct platform_driver ast8250_platform_driver = { ++ .driver = { ++ .name = DEVICE_NAME, ++ .pm = &ast8250_pm_ops, ++ .of_match_table = ast8250_of_match, ++ }, ++ .probe = ast8250_probe, ++ .remove = ast8250_remove, ++}; ++ ++module_platform_driver(ast8250_platform_driver); ++ ++MODULE_AUTHOR("Chia-Wei Wang "); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("Aspeed UART Driver"); +diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig +--- a/drivers/tty/serial/8250/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/tty/serial/8250/Kconfig 2025-12-23 10:16:08.799239284 +0000 +@@ -256,6 +256,15 @@ + To compile this driver as a module, choose M here: the module + will be called 8250_accent. + ++config SERIAL_8250_ASPEED ++ tristate "Aspeed UART" ++ depends on SERIAL_8250 && ARCH_ASPEED ++ select ASPEED_UDMA ++ help ++ If you have a system using an Aspeed AST26xx SoCs and wish to ++ make use of its 16550A-compatible UART devices with DMA support, ++ say Y to this option. If unsure, say N. ++ + config SERIAL_8250_ASPEED_VUART + tristate "Aspeed Virtual UART" + depends on SERIAL_8250 +diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile +--- a/drivers/tty/serial/8250/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/tty/serial/8250/Makefile 2025-12-23 10:16:12.776172591 +0000 +@@ -21,6 +21,7 @@ + obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o + obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o + obj-$(CONFIG_SERIAL_8250_ASPEED_VUART) += 8250_aspeed_vuart.o ++obj-$(CONFIG_SERIAL_8250_ASPEED) += 8250_aspeed.o + obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o + obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o + obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o +diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig +--- a/drivers/ufs/host/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/ufs/host/Kconfig 2025-12-23 10:16:09.263231503 +0000 +@@ -142,3 +142,12 @@ + + Select this if you have UFS controller on Unisoc chipset. + If unsure, say N. ++ ++config SCSI_UFS_ASPEED ++ tristate "Aspeed specific hooks to UFS controller platform driver" ++ depends on SCSI_UFSHCD_PLATFORM ++ help ++ This selects the Aspeed specific additions to UFSHCD platform driver. ++ ++ Selects this if you have Aspeed platform with UFS controller. ++ If unsure, say N. +diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile +--- a/drivers/ufs/host/Makefile 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/ufs/host/Makefile 2025-12-23 10:16:13.396162194 +0000 +@@ -12,3 +12,4 @@ + obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o + obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o + obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o ++obj-$(CONFIG_SCSI_UFS_ASPEED) += ufs-aspeed.o +diff --git a/drivers/ufs/host/ufs-aspeed.c b/drivers/ufs/host/ufs-aspeed.c +--- a/drivers/ufs/host/ufs-aspeed.c 1970-01-01 00:00:00.000000000 +0000 ++++ b/drivers/ufs/host/ufs-aspeed.c 2025-12-23 10:16:21.026034312 +0000 +@@ -0,0 +1,425 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* Copyright (C) 2019 ASPEED Technology Inc. */ ++/* Copyright (C) 2019 IBM Corp. */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ufshcd-pltfrm.h" ++ ++#define UFS_MPHY_RST_REG 0x0 ++#define UFS_MPHY_RST_N BIT(0) ++#define UFS_MPHY_RST_N_PCS BIT(4) ++ ++#define UFS_MPHY_VCONTROL 0x98 ++#define UFS_MPHY_CALI_IN_1 0x90 ++#define UFS_MPHY_CALI_IN_0 0x8c ++ ++#define ASPEED_UFS_REG_HCLKDIV 0xFC ++ ++struct aspeed_ufscnr { ++ struct clk *clk; ++ struct resource *res; ++ struct reset_control *rst; ++ struct device *dev; ++ ++ void __iomem *regs; ++}; ++ ++struct aspeed_ufshc { ++ u32 temp; ++}; ++ ++static int aspeed_ufscnr_probe(struct platform_device *pdev) ++{ ++ struct device_node *parent, *child; ++ struct aspeed_ufscnr *cnr; ++ u32 reg; ++ int ret; ++ ++ cnr = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_ufscnr), GFP_KERNEL); ++ if (!cnr) ++ return -ENOMEM; ++ ++ cnr->dev = &pdev->dev; ++ ++ cnr->rst = devm_reset_control_get(cnr->dev, NULL); ++ if (IS_ERR(cnr->rst)) { ++ dev_err(&pdev->dev, "Unable to get reset\n"); ++ return PTR_ERR(cnr->rst); ++ } ++ ++ ret = reset_control_assert(cnr->rst); ++ mdelay(1); ++ ret = reset_control_deassert(cnr->rst); ++ mdelay(1); ++ ++ cnr->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(cnr->clk)) { ++ dev_err(&pdev->dev, "Unable to get clock\n"); ++ return PTR_ERR(cnr->clk); ++ } ++ ++ ret = clk_prepare_enable(cnr->clk); ++ if (ret) { ++ dev_err(&pdev->dev, "Unable to enable UFS CLK\n"); ++ return ret; ++ } ++ ++ cnr->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ cnr->regs = devm_ioremap_resource(&pdev->dev, cnr->res); ++ if (IS_ERR(cnr->regs)) { ++ ret = PTR_ERR(cnr->regs); ++ goto err_clk; ++ } ++ ++ /* given rext, rx_r100, tx calibration value */ ++ writel(0x000d0707, cnr->regs + UFS_MPHY_CALI_IN_1); ++ writel(0xbbffff00, cnr->regs + UFS_MPHY_CALI_IN_0); ++ ++ /* mphy reset deassert */ ++ reg = readl(cnr->regs + UFS_MPHY_RST_REG); ++ reg &= ~(UFS_MPHY_RST_N | UFS_MPHY_RST_N_PCS); ++ ++ writel(0, cnr->regs + UFS_MPHY_RST_REG); ++ mdelay(1); ++ writel(reg | UFS_MPHY_RST_N | UFS_MPHY_RST_N_PCS, cnr->regs + UFS_MPHY_RST_REG); ++ mdelay(1); ++ ++ dev_set_drvdata(&pdev->dev, cnr); ++ ++ parent = pdev->dev.of_node; ++ for_each_available_child_of_node(parent, child) { ++ struct platform_device *cpdev; ++ ++ cpdev = of_platform_device_create(child, NULL, &pdev->dev); ++ if (!cpdev) { ++ of_node_put(child); ++ ret = -ENODEV; ++ goto err_clk; ++ } ++ } ++ ++ return 0; ++ ++err_clk: ++ clk_disable_unprepare(cnr->clk); ++ return ret; ++} ++ ++/** ++ * aspeed_ufscnr_remove - removes the ufscnr driver ++ * @pdev: pointer to platform device handle ++ * ++ * Always returns 0 ++ */ ++static void aspeed_ufscnr_remove(struct platform_device *pdev) ++{ ++ struct aspeed_ufscnr *cnr = dev_get_drvdata(&pdev->dev); ++ ++ clk_disable_unprepare(cnr->clk); ++} ++ ++/** ++ * aspeed_ufshc_init - performs additional ufs initialization ++ * @hba: host controller instance ++ * ++ * Returns status of initialization ++ */ ++static int aspeed_ufshc_init(struct ufs_hba *hba) ++{ ++ int status = 0; ++ struct aspeed_ufshc *ufshc; ++ struct device *dev = hba->dev; ++ ++ ufshc = devm_kzalloc(dev, sizeof(*ufshc), GFP_KERNEL); ++ ++ if (!ufshc) ++ return -ENOMEM; ++ ++ ufshcd_set_variant(hba, ufshc); ++ ++ status = ufshcd_vops_phy_initialization(hba); ++ ++ hba->quirks |= UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE | UFSHCD_QUIRK_HIBERN_FASTAUTO; ++ ++ return status; ++} ++ ++static int aspeed_ufshc_set_hclkdiv(struct ufs_hba *hba) ++{ ++ struct ufs_clk_info *clki; ++ struct list_head *head = &hba->clk_list_head; ++ unsigned long core_clk_rate = 0; ++ u32 core_clk_div = 0; ++ ++ if (list_empty(head)) ++ return 0; ++ ++ list_for_each_entry(clki, head, list) { ++ if (IS_ERR_OR_NULL(clki->clk)) ++ continue; ++ if (!strcmp(clki->name, "core_clk")) ++ core_clk_rate = clk_get_rate(clki->clk); ++ } ++ ++ if (!core_clk_rate) { ++ dev_err(hba->dev, "%s: unable to find core_clk rate\n", ++ __func__); ++ return -EINVAL; ++ } ++ ++ core_clk_div = core_clk_rate / USEC_PER_SEC; ++ ++ ufshcd_writel(hba, core_clk_div, ASPEED_UFS_REG_HCLKDIV); ++ /** ++ * Make sure the register was updated, ++ * UniPro layer will not work with an incorrect value. ++ */ ++ mb(); ++ ++ return 0; ++} ++ ++static int aspeed_ufshc_hce_enable_notify(struct ufs_hba *hba, ++ enum ufs_notify_change_status status) ++{ ++ if (status != PRE_CHANGE) ++ return 0; ++ ++ return aspeed_ufshc_set_hclkdiv(hba); ++} ++ ++/** ++ * aspeed_ufshc_link_startup_notify() ++ * Called before and after Link startup is carried out. ++ * @hba: host controller instance ++ * @status: notify stage (pre, post change) ++ * ++ * Return zero for success and non-zero for failure ++ */ ++static int aspeed_ufshc_link_startup_notify(struct ufs_hba *hba, ++ enum ufs_notify_change_status status) ++{ ++ struct aspeed_ufscnr *cnr; ++ ++ if (status != PRE_CHANGE) { ++ cnr = dev_get_drvdata(hba->dev->parent); ++ writel(0x170707, cnr->regs + UFS_MPHY_CALI_IN_1); ++ return 0; ++ } ++ ++ /* ++ * Some UFS devices have issues if LCC is enabled. ++ * So we are setting PA_Local_TX_LCC_Enable to 0 ++ * before link startup which will make sure that both host ++ * and device TX LCC are disabled once link startup is ++ * completed. ++ */ ++ ufshcd_disable_host_tx_lcc(hba); ++ ++ /* ++ * Disabling Autohibern8 feature. ++ */ ++ hba->ahit = 0; ++ ++ return 0; ++} ++ ++static int aspeed_ufshc_pre_pwr_change(struct ufs_hba *hba, ++ struct ufs_pa_layer_attr *dev_max_params, ++ struct ufs_pa_layer_attr *dev_req_params) ++{ ++ struct ufs_host_params host_cap; ++ int ret; ++ ++ ufshcd_init_host_params(&host_cap); ++ host_cap.hs_rx_gear = UFS_HS_G3; ++ host_cap.hs_tx_gear = UFS_HS_G3; ++ host_cap.rx_pwr_hs = FASTAUTO_MODE; ++ host_cap.tx_pwr_hs = FASTAUTO_MODE; ++ ++ ret = ufshcd_negotiate_pwr_params(&host_cap, ++ dev_max_params, ++ dev_req_params); ++ if (ret) { ++ pr_info("%s: failed to determine capabilities\n", ++ __func__); ++ } ++ ++ /* change 2 lane before high speed due to old unipro v1.6 */ ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_PWM_G1); ++ ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_PWM_G1); ++ ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), ++ dev_req_params->lane_tx); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), ++ dev_req_params->lane_rx); ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), ++ dev_req_params->hs_rate); ++ ++ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), ++ PA_NO_ADAPT); ++ ++ ret = ufshcd_uic_change_pwr_mode(hba, ++ SLOWAUTO_MODE << 4 | SLOWAUTO_MODE); ++ ++ if (ret) { ++ dev_err(hba->dev, "%s: PWMG1B SLOWAUTO failed ret=%d\n", ++ __func__, ret); ++ } ++ ++ return ret; ++} ++ ++static int aspeed_ufshc_pwr_change_notify(struct ufs_hba *hba, ++ enum ufs_notify_change_status stage, ++ struct ufs_pa_layer_attr *dev_max_params, ++ struct ufs_pa_layer_attr *dev_req_params) ++{ ++ int ret = 0; ++ ++ switch (stage) { ++ case PRE_CHANGE: ++ ret = aspeed_ufshc_pre_pwr_change(hba, dev_max_params, ++ dev_req_params); ++ break; ++ case POST_CHANGE: ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ return ret; ++} ++ ++static const struct ufs_hba_variant_ops aspeed_ufshc_hba_vops = { ++ .name = "aspeed-ufshc", ++ .init = aspeed_ufshc_init, ++ .hce_enable_notify = aspeed_ufshc_hce_enable_notify, ++ .link_startup_notify = aspeed_ufshc_link_startup_notify, ++ .pwr_change_notify = aspeed_ufshc_pwr_change_notify, ++}; ++ ++static const struct of_device_id aspeed_ufshc_of_match[] = { ++ { ++ .compatible = "aspeed,ast2700-ufshc", ++ .data = &aspeed_ufshc_hba_vops, ++ }, ++ { }, ++}; ++ ++MODULE_DEVICE_TABLE(of, aspeed_ufshc_of_match); ++ ++/** ++ * aspeed_ufshc_probe - probe routine of the driver ++ * @pdev: pointer to platform device handle ++ * ++ * Return zero for success and non-zero for failure ++ */ ++static int aspeed_ufshc_probe(struct platform_device *pdev) ++{ ++ int err; ++ const struct of_device_id *of_id; ++ struct ufs_hba_variant_ops *vops; ++ struct device *dev = &pdev->dev; ++ ++ of_id = of_match_node(aspeed_ufshc_of_match, dev->of_node); ++ vops = (struct ufs_hba_variant_ops *)of_id->data; ++ ++ /* Perform generic probe */ ++ err = ufshcd_pltfrm_init(pdev, vops); ++ if (err) ++ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err); ++ ++ return err; ++} ++ ++/** ++ * aspeed_ufshc_remove - removes the ufs driver ++ * @pdev: pointer to platform device handle ++ * ++ * Always returns 0 ++ */ ++static void aspeed_ufshc_remove(struct platform_device *pdev) ++{ ++ struct ufs_hba *hba = platform_get_drvdata(pdev); ++ ++ ufshcd_remove(hba); ++} ++ ++static const struct dev_pm_ops aspeed_ufshc_dev_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) ++ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) ++ .prepare = ufshcd_suspend_prepare, ++ .complete = ufshcd_resume_complete, ++}; ++ ++static struct platform_driver aspeed_ufshc_driver = { ++ .probe = aspeed_ufshc_probe, ++ .remove = aspeed_ufshc_remove, ++ .driver = { ++ .name = "aspeed-ufshcd", ++ .pm = &aspeed_ufshc_dev_pm_ops, ++ .of_match_table = aspeed_ufshc_of_match, ++ }, ++}; ++ ++static const struct of_device_id aspeed_ufscnr_of_match[] = { ++ { .compatible = "aspeed,ast2700-ufscnr", }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(of, aspeed_ufscnr_of_match); ++ ++static struct platform_driver aspeed_ufscnr_driver = { ++ .driver = { ++ .name = "aspeed-ufscnr", ++ .probe_type = PROBE_PREFER_ASYNCHRONOUS, ++ .of_match_table = aspeed_ufscnr_of_match, ++ }, ++ .probe = aspeed_ufscnr_probe, ++ .remove = aspeed_ufscnr_remove, ++}; ++ ++static int __init aspeed_ufs_init(void) ++{ ++ int rc; ++ ++ rc = platform_driver_register(&aspeed_ufscnr_driver); ++ if (rc < 0) ++ return rc; ++ ++ rc = platform_driver_register(&aspeed_ufshc_driver); ++ if (rc < 0) ++ platform_driver_unregister(&aspeed_ufscnr_driver); ++ ++ return rc; ++} ++module_init(aspeed_ufs_init); ++ ++static void __exit aspeed_ufs_exit(void) ++{ ++ platform_driver_unregister(&aspeed_ufscnr_driver); ++ platform_driver_unregister(&aspeed_ufshc_driver); ++} ++module_exit(aspeed_ufs_exit); ++ ++MODULE_AUTHOR("Cool Lee "); ++MODULE_DESCRIPTION("Aspeed UFS host controller platform driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig +--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig 2025-12-23 10:16:21.160032066 +0000 +@@ -4,5 +4,6 @@ + depends on ARCH_ASPEED || COMPILE_TEST + depends on USB_LIBCOMPOSITE + help +- USB peripheral controller for the Aspeed AST2400, AST2500 and +- AST2600 family SoCs supporting the "vHub" functionality and USB2.0 ++ USB peripheral controller for the Aspeed AST2400, AST2500, AST2600, ++ and AST2700 family SoCs supporting the "vHub" functionality and ++ USB2.0 +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c +--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c 2025-12-23 10:16:21.160032066 +0000 +@@ -23,9 +23,27 @@ + #include + #include + #include ++#include ++#include + + #include "vhub.h" + ++#define ASPEED_G7_SCU_VHUB_USB_FUNC_OFFSET 0x410 ++ ++enum ast_g7_pcie { ++ NOT_SUPPORTED, ++ PCIE_EHCI, ++ PCIE_XHCI, ++}; ++ ++struct ast_vhub_match_data { ++ enum ast_g7_pcie g7_pcie; ++ u32 usb_mode_mask; ++ u32 xhci_mode_mask; ++ u32 txfifo_fix_reg; ++ u32 txfifo_fix_val; ++}; ++ + void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, + int status) + { +@@ -239,6 +257,7 @@ + if (vhub->force_usb1) + ctrl |= VHUB_CTRL_FULL_SPEED_ONLY; + ++ ctrl |= VHUB_CTRL_AUTO_REMOTE_WAKEUP; + ctrl |= VHUB_CTRL_UPSTREAM_CONNECT; + writel(ctrl, vhub->regs + AST_VHUB_CTRL); + +@@ -253,6 +272,52 @@ + vhub->regs + AST_VHUB_IER); + } + ++static int ast_vhub_init_pcie(struct ast_vhub *vhub, const struct ast_vhub_match_data *pdata) ++{ ++ struct device *dev = &vhub->pdev->dev; ++ struct regmap *pcie_device; ++ struct regmap *scu; ++ u32 scu_usb; ++ int rc = 0; ++ ++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu"); ++ if (IS_ERR(scu)) { ++ dev_err(dev, "failed to find SCU regmap\n"); ++ return PTR_ERR(scu); ++ } ++ ++ regmap_read(scu, ASPEED_G7_SCU_VHUB_USB_FUNC_OFFSET, &scu_usb); ++ ++ /* Check EHCI or xHCI to virtual hub */ ++ if ((scu_usb & pdata->usb_mode_mask) == 0) { ++ pcie_device = syscon_regmap_lookup_by_phandle(dev->of_node, ++ "aspeed,device"); ++ if (IS_ERR(pcie_device)) { ++ dev_err(dev, "failed to find PCIe device regmap\n"); ++ return PTR_ERR(pcie_device); ++ } ++ if (pdata->g7_pcie == PCIE_XHCI) { ++ /* Check PCIe xHCI or BMC xHCI to virtual hub */ ++ if ((scu_usb & pdata->xhci_mode_mask) == 0) { ++ dev_info(dev, "PCIe xHCI to vhub\n"); ++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev ++ /* Turn on PCIe xHCI without MSI */ ++ regmap_update_bits(pcie_device, 0x70, ++ BIT(19) | BIT(11) | BIT(3), ++ BIT(19) | BIT(11) | BIT(3)); ++ } ++ } else if (pdata->g7_pcie == PCIE_EHCI) { ++ dev_info(dev, "PCIe EHCI to vhub\n"); ++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev ++ /* Turn on PCIe EHCI without MSI */ ++ regmap_update_bits(pcie_device, 0x70, ++ BIT(18) | BIT(10) | BIT(2), ++ BIT(18) | BIT(10) | BIT(2)); ++ } ++ } ++ return rc; ++} ++ + static void ast_vhub_remove(struct platform_device *pdev) + { + struct ast_vhub *vhub = platform_get_drvdata(pdev); +@@ -280,6 +345,9 @@ + if (vhub->clk) + clk_disable_unprepare(vhub->clk); + ++ if (vhub->rst) ++ reset_control_assert(vhub->rst); ++ + spin_unlock_irqrestore(&vhub->lock, flags); + + if (vhub->ep0_bufs) +@@ -291,6 +359,61 @@ + vhub->ep0_bufs = NULL; + } + ++static int ast_vhub_init_uart(struct device *dev, struct ast_vhub *vhub) ++{ ++ const struct device_node *np = dev->of_node; ++ void __iomem *regs = vhub->regs + 0x800; ++ int i, rc = 0; ++ int num_ports; ++ u32 ports[AST_VHUB_NUM_UART_PORTS], port; ++ u32 mode_sel = 0, dev_en = 0; ++ ++ num_ports = of_property_count_u32_elems(np, "aspeed,uart-ports"); ++ if (num_ports == -EINVAL) { ++ /* Property not found */ ++ return 0; ++ } ++ if (num_ports < 0) { ++ dev_err(dev, "Failed to read uart-ports property\n"); ++ return num_ports; ++ } ++ if (num_ports > AST_VHUB_NUM_UART_PORTS) { ++ dev_warn(dev, "Too many UART ports (%d), max is %d\n", ++ num_ports, AST_VHUB_NUM_UART_PORTS); ++ num_ports = AST_VHUB_NUM_UART_PORTS; ++ } ++ ++ rc = of_property_read_u32_array(np, "aspeed,uart-ports", ++ ports, num_ports); ++ if (rc) ++ return rc; ++ ++ dev_en = readl(regs + AST_VHUB_COM_EN_CTRL); ++ ++ for (i = 0; i < num_ports; i++) { ++ // io-die uart only ++ if (ports[i] == 4 || ports[i] > AST_VHUB_NUM_UART_PORTS) { ++ dev_warn(dev, "Ignoring invalid UART port %d\n", ++ ports[i]); ++ continue; ++ } ++ ++ if (ports[i] < 4) ++ port = ports[i]; ++ else ++ port = ports[i] - 1; ++ ++ mode_sel |= (0x2 << (port * 2)); ++ dev_en |= BIT(port + 16); ++ } ++ ++ dev_info(dev, "Enabled UART ports\n"); ++ ++ writel(mode_sel, regs + AST_VHUB_COM_MODE_SEL); ++ writel(dev_en, regs + AST_VHUB_COM_EN_CTRL); ++ return 0; ++} ++ + static int ast_vhub_probe(struct platform_device *pdev) + { + enum usb_device_speed max_speed; +@@ -298,11 +421,19 @@ + struct resource *res; + int i, rc = 0; + const struct device_node *np = pdev->dev.of_node; ++ const struct ast_vhub_match_data *pdata; ++ u32 val; + + vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL); + if (!vhub) + return -ENOMEM; + ++ pdata = of_device_get_match_data(&pdev->dev); ++ if (IS_ERR(pdata)) { ++ dev_err(&pdev->dev, "Couldn't get match data\n"); ++ return -ENODEV; ++ } ++ + rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports", + &vhub->max_ports); + if (rc < 0) +@@ -337,6 +468,13 @@ + + platform_set_drvdata(pdev, vhub); + ++ vhub->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); ++ ++ if (IS_ERR(vhub->rst)) { ++ rc = PTR_ERR(vhub->rst); ++ goto err; ++ } ++ + vhub->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(vhub->clk)) { + rc = PTR_ERR(vhub->clk); +@@ -348,6 +486,28 @@ + goto err; + } + ++ if (vhub->rst) { ++ mdelay(10); ++ rc = reset_control_deassert(vhub->rst); ++ if (rc) ++ goto err; ++ } ++ ++ if (pdata->g7_pcie != NOT_SUPPORTED) { ++ rc = ast_vhub_init_pcie(vhub, pdata); ++ if (rc) ++ goto err; ++ ++ /* For G7 PortA/B, enable the option of TXFIFO fix. ++ * It forces the CRC error for a re-try when vHub cannot fetch DRAM in time. ++ */ ++ val = readl(vhub->regs + pdata->txfifo_fix_reg); ++ writel(pdata->txfifo_fix_val | val, ++ vhub->regs + pdata->txfifo_fix_reg); ++ } ++ ++ ast_vhub_init_uart(&pdev->dev, vhub); ++ + /* Check if we need to limit the HW to USB1 */ + max_speed = usb_get_maximum_speed(&pdev->dev); + if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH) +@@ -370,6 +530,12 @@ + goto err; + } + ++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ++ if (rc) { ++ dev_warn(&pdev->dev, "No suitable DMA available\n"); ++ goto err; ++ } ++ + /* + * Allocate DMA buffers for all EP0s in one chunk, + * one per port and one for the vHub itself +@@ -412,15 +578,86 @@ + return rc; + } + ++static const struct ast_vhub_match_data aspeed_vhub_match_data = { ++ .g7_pcie = NOT_SUPPORTED, ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhuba0_match_data = { ++ .g7_pcie = PCIE_EHCI, ++ .usb_mode_mask = GENMASK(25, 24), ++ .xhci_mode_mask = 0, ++ .txfifo_fix_reg = 0x800, ++ .txfifo_fix_val = BIT(13), ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhubb0_match_data = { ++ .g7_pcie = PCIE_EHCI, ++ .usb_mode_mask = GENMASK(29, 28), ++ .xhci_mode_mask = 0, ++ .txfifo_fix_reg = 0x800, ++ .txfifo_fix_val = BIT(13), ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhuba1_match_data = { ++ .g7_pcie = PCIE_XHCI, ++ .usb_mode_mask = GENMASK(3, 2), ++ .xhci_mode_mask = BIT_MASK(9), ++ .txfifo_fix_reg = 0x80C, ++ .txfifo_fix_val = BIT(31), ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhubb1_match_data = { ++ .g7_pcie = PCIE_XHCI, ++ .usb_mode_mask = GENMASK(7, 6), ++ .xhci_mode_mask = BIT_MASK(10), ++ .txfifo_fix_reg = 0x80C, ++ .txfifo_fix_val = BIT(31), ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhubc_match_data = { ++ .g7_pcie = NOT_SUPPORTED, ++}; ++ ++static const struct ast_vhub_match_data aspeed_g7_vhubd_match_data = { ++ .g7_pcie = NOT_SUPPORTED, ++}; ++ + static const struct of_device_id ast_vhub_dt_ids[] = { + { + .compatible = "aspeed,ast2400-usb-vhub", ++ .data = &aspeed_vhub_match_data, + }, + { + .compatible = "aspeed,ast2500-usb-vhub", ++ .data = &aspeed_vhub_match_data, + }, + { + .compatible = "aspeed,ast2600-usb-vhub", ++ .data = &aspeed_vhub_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhuba0", ++ .data = &aspeed_g7_vhuba0_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhubb0", ++ .data = &aspeed_g7_vhubb0_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhuba1", ++ .data = &aspeed_g7_vhuba1_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhubb1", ++ .data = &aspeed_g7_vhubb1_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhubc", ++ .data = &aspeed_g7_vhubc_match_data, ++ }, ++ { ++ .compatible = "aspeed,ast2700-usb-vhubd", ++ .data = &aspeed_g7_vhubd_match_data, + }, + { } + }; +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c +--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c 2025-12-23 10:16:21.160032066 +0000 +@@ -116,10 +116,14 @@ + + if (wValue == USB_DEVICE_REMOTE_WAKEUP) { + d->wakeup_en = is_set; ++ val = readl(d->vhub->regs + AST_VHUB_CTRL); ++ if (is_set) ++ writel(val | VHUB_CTRL_AUTO_REMOTE_WAKEUP, ++ d->vhub->regs + AST_VHUB_CTRL); ++ + return std_req_complete; +- } + +- if (wValue == USB_DEVICE_TEST_MODE) { ++ } else if (wValue == USB_DEVICE_TEST_MODE) { + val = readl(d->vhub->regs + AST_VHUB_CTRL); + val &= ~GENMASK(10, 8); + val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7); +@@ -239,7 +243,7 @@ + d->gadget.speed = ep->vhub->speed; + if (d->gadget.speed > d->driver->max_speed) + d->gadget.speed = d->driver->max_speed; +- DDBG(d, "fist packet, captured speed %d\n", ++ DDBG(d, "first packet, captured speed %d\n", + d->gadget.speed); + } + +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c +--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c 2025-12-23 10:16:21.160032066 +0000 +@@ -340,7 +340,9 @@ + struct ast_vhub *vhub = ep->vhub; + unsigned long flags; + bool empty; +- int rc; ++ int rc = 0; ++ ++ spin_lock_irqsave(&vhub->lock, flags); + + /* Paranoid checks */ + if (!u_req || !u_req->complete || !u_req->buf) { +@@ -349,14 +351,16 @@ + dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n", + u_req->complete, req->internal); + } +- return -EINVAL; ++ rc = -EINVAL; ++ goto out; + } + + /* Endpoint enabled ? */ + if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || + !ep->dev->enabled) { + EPDBG(ep, "Enqueuing request on wrong or disabled EP\n"); +- return -ESHUTDOWN; ++ rc = -ESHUTDOWN; ++ goto out; + } + + /* Map request for DMA if possible. For now, the rule for DMA is +@@ -383,11 +387,16 @@ + if (rc) { + dev_warn(&vhub->pdev->dev, + "Request mapping failure %d\n", rc); +- return rc; ++ goto out; + } + } else + u_req->dma = 0; + ++ if (ep->dev->wakeup_en) { ++ EPVDBG(ep, "Wakeup host first\n"); ++ ast_vhub_hub_wake_all(vhub); ++ } ++ + EPVDBG(ep, "enqueue req @%p\n", req); + EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n", + u_req->length, (u32)u_req->dma, u_req->zero, +@@ -400,9 +409,8 @@ + req->act_count = 0; + req->active = false; + req->last_desc = -1; +- spin_lock_irqsave(&vhub->lock, flags); +- empty = list_empty(&ep->queue); + ++ empty = list_empty(&ep->queue); + /* Add request to list and kick processing if empty */ + list_add_tail(&req->queue, &ep->queue); + if (empty) { +@@ -411,9 +419,10 @@ + else + ast_vhub_epn_kick(ep, req); + } ++out: + spin_unlock_irqrestore(&vhub->lock, flags); + +- return 0; ++ return rc; + } + + static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, +@@ -786,6 +795,20 @@ + ep->dev = NULL; + } + ++static void ast_vhub_epn_flush(struct usb_ep *u_ep) ++{ ++ struct ast_vhub_ep *ep = to_ast_ep(u_ep); ++ struct ast_vhub *vhub = ep->vhub; ++ unsigned long flags; ++ ++ EPDBG(ep, "flushing !\n"); ++ ++ spin_lock_irqsave(&vhub->lock, flags); ++ /* This will clear out all the request of the endpoint and send requests done messages. */ ++ ast_vhub_nuke(ep, -EINVAL); ++ spin_unlock_irqrestore(&vhub->lock, flags); ++} ++ + static const struct usb_ep_ops ast_vhub_epn_ops = { + .enable = ast_vhub_epn_enable, + .disable = ast_vhub_epn_disable, +@@ -796,6 +819,7 @@ + .set_wedge = ast_vhub_epn_set_wedge, + .alloc_request = ast_vhub_alloc_request, + .free_request = ast_vhub_free_request, ++ .fifo_flush = ast_vhub_epn_flush, + }; + + struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c +--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c 2025-12-23 10:16:21.160032066 +0000 +@@ -221,9 +221,8 @@ + EPDBG(ep, "Hub remote wakeup %s\n", + is_set ? "enabled" : "disabled"); + return std_req_complete; +- } + +- if (wValue == USB_DEVICE_TEST_MODE) { ++ } else if (wValue == USB_DEVICE_TEST_MODE) { + val = readl(ep->vhub->regs + AST_VHUB_CTRL); + val &= ~GENMASK(10, 8); + val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7); +@@ -445,10 +444,9 @@ + + /* GET/SET_CONFIGURATION */ + case DeviceRequest | USB_REQ_GET_CONFIGURATION: +- return ast_vhub_simple_reply(ep, 1); ++ return ast_vhub_simple_reply(ep, vhub->current_config); + case DeviceOutRequest | USB_REQ_SET_CONFIGURATION: +- if (wValue != 1) +- return std_req_stall; ++ vhub->current_config = wValue; + return std_req_complete; + + /* GET_DESCRIPTOR */ +@@ -673,6 +671,9 @@ + ast_vhub_port_reset(vhub, port); + return std_req_complete; + case USB_PORT_FEAT_POWER: ++ ast_vhub_change_port_stat(vhub, port, ++ 0, USB_PORT_STAT_POWER, ++ false); + /* + * On Power-on, we mark the connected flag changed, + * if there's a connected device, some hosts will +@@ -750,9 +751,6 @@ + stat = vhub->ports[port].status; + chg = vhub->ports[port].change; + +- /* We always have power */ +- stat |= USB_PORT_STAT_POWER; +- + EPDBG(ep, " port status=%04x change=%04x\n", stat, chg); + + return ast_vhub_simple_reply(ep, +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h +--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h 2025-12-23 10:16:21.160032066 +0000 +@@ -199,6 +199,14 @@ + #define VHUB_DSC1_IN_SET_LEN(x) ((x) & 0xfff) + #define VHUB_DSC1_IN_LEN(x) ((x) & 0xfff) + ++/*********************************** ++ * * ++ * USB2COM register definitions * ++ * * ++ ***********************************/ ++#define AST_VHUB_COM_MODE_SEL 0x10 ++#define AST_VHUB_COM_EN_CTRL 0x1C ++ + /**************************************** + * * + * Data structures and misc definitions * +@@ -218,6 +226,8 @@ + * values are 256 and 32) + */ + ++#define AST_VHUB_NUM_UART_PORTS 15 /* USB2COM ports */ ++ + struct ast_vhub; + struct ast_vhub_dev; + +@@ -388,6 +398,8 @@ + spinlock_t lock; + struct work_struct wake_work; + struct clk *clk; ++ struct reset_control *rst; ++ + + /* EP0 DMA buffers allocated in one chunk */ + void *ep0_bufs; +@@ -419,6 +431,7 @@ + + /* Upstream bus speed captured at bus reset */ + unsigned int speed; ++ u8 current_config; + + /* Standard USB Descriptors of the vhub. */ + struct usb_device_descriptor vhub_dev_desc; +diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c +--- a/drivers/usb/gadget/udc/aspeed_udc.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/usb/gadget/udc/aspeed_udc.c 2025-12-23 10:16:21.115032820 +0000 +@@ -156,7 +156,7 @@ + #define AST_EP_DMA_DESC_PID_DATA1 (2 << 14) + #define AST_EP_DMA_DESC_PID_MDATA (3 << 14) + #define EP_DESC1_IN_LEN(x) ((x) & 0x1fff) +-#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */ ++#define AST_EP_DMA_DESC_MAX_LEN (4096) /* Max packet length for trasmit in 1 desc */ + + struct ast_udc_request { + struct usb_request req; +@@ -278,6 +278,17 @@ + + /*-------------------------------------------------------------------------*/ + ++static inline void ast_udc_dma_workaround(void *addr) ++{ ++ /* ++ * The workaround consists of using a dummy read of the memory before ++ * doing the MMIO writes. This will ensure that the previous writes ++ * have been "pushed out". ++ */ ++ mb(); ++ (void)__raw_readl((void __iomem *)addr); ++} ++ + static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req, + int status) + { +@@ -368,7 +379,6 @@ + ep->desc = desc; + ep->stopped = 0; + ep->ep.maxpacket = maxpacket; +- ep->chunk_max = AST_EP_DMA_DESC_MAX_LEN; + + if (maxpacket < AST_UDC_EPn_MAX_PACKET) + ep_conf = EP_SET_MAX_PKT(maxpacket); +@@ -381,7 +391,18 @@ + if (!ep->dir_in) + ep_conf |= EP_DIR_OUT; + +- EP_DBG(ep, "type %d, dir_in %d\n", type, dir_in); ++ /* ++ * Large send function can send up to 8 packets from ++ * one descriptor with a limit of 4096 bytes. ++ */ ++ ep->chunk_max = ep->ep.maxpacket; ++ if (ep->dir_in) { ++ ep->chunk_max <<= 3; ++ while (ep->chunk_max > AST_EP_DMA_DESC_MAX_LEN) ++ ep->chunk_max -= ep->ep.maxpacket; ++ } ++ ++ EP_DBG(ep, "type %d, dir_in %d, chunk_max %d\n", type, dir_in, ep->chunk_max); + switch (type) { + case USB_ENDPOINT_XFER_ISOC: + ep_conf |= EP_SET_TYPE_MASK(EP_TYPE_ISO); +@@ -478,7 +499,7 @@ + struct device *dev = &udc->pdev->dev; + bool last = false; + int chunk, count; +- u32 offset; ++ u32 offset, size; + + if (!ep->descs) { + dev_warn(dev, "%s: Empty DMA descs list failure\n", +@@ -489,9 +510,9 @@ + chunk = tx_len; + offset = count = 0; + +- EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req, ++ EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x zero=%d\n", req, + "wptr", ep->descs_wptr, "dma_buf", dma_buf, +- "tx_len", tx_len); ++ "tx_len", tx_len, req->req.zero); + + /* Create Descriptor Lists */ + while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) { +@@ -499,13 +520,23 @@ + ep->descs[ep->descs_wptr].des_0 = dma_buf + offset; + + if (chunk > ep->chunk_max) { +- ep->descs[ep->descs_wptr].des_1 = ep->chunk_max; ++ size = ep->chunk_max; + } else { +- ep->descs[ep->descs_wptr].des_1 = chunk; +- last = true; ++ size = chunk; ++ /* ++ * Check if this is the last packet? ++ * May go the loop again for the zero length packet ++ */ ++ if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) ++ last = true; + } + +- chunk -= ep->chunk_max; ++ ep->descs[ep->descs_wptr].des_1 = size; ++ chunk -= size; ++ offset += size; ++ ++ if (last) ++ ast_udc_dma_workaround(&ep->descs[ep->descs_wptr]); + + EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n", + ep->descs_wptr, +@@ -520,8 +551,6 @@ + + if (ep->descs_wptr >= AST_UDC_DESCS_COUNT) + ep->descs_wptr = 0; +- +- offset = ep->chunk_max * count; + } + + return 0; +@@ -538,6 +567,9 @@ + EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n", + req, tx_len, ep->dir_in); + ++ if (ep->dir_in) ++ ast_udc_dma_workaround(req->req.buf + req->req.actual); ++ + ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF); + + /* Start DMA */ +@@ -549,11 +581,13 @@ + static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep, + struct ast_udc_request *req) + { ++ u32 count; + u32 descs_max_size; + u32 tx_len; + u32 last; + +- descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT; ++ count = req->req.zero ? AST_UDC_DESCS_COUNT - 1 : AST_UDC_DESCS_COUNT; ++ descs_max_size = AST_EP_DMA_DESC_MAX_LEN * count; + + last = req->req.length - req->req.actual; + tx_len = last > descs_max_size ? descs_max_size : last; +@@ -1134,21 +1168,6 @@ + /* Ack interrupts */ + ast_udc_write(udc, isr, AST_UDC_ISR); + +- if (isr & UDC_IRQ_BUS_RESET) { +- ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n"); +- udc->gadget.speed = USB_SPEED_UNKNOWN; +- +- ep = &udc->ep[1]; +- EP_DBG(ep, "dctrl:0x%x\n", +- ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); +- +- if (udc->driver && udc->driver->reset) { +- spin_unlock(&udc->lock); +- udc->driver->reset(&udc->gadget); +- spin_lock(&udc->lock); +- } +- } +- + if (isr & UDC_IRQ_BUS_SUSPEND) { + ISR_DBG(udc, "UDC_IRQ_BUS_SUSPEND\n"); + udc->suspended_from = udc->gadget.state; +@@ -1172,6 +1191,21 @@ + } + } + ++ if (isr & UDC_IRQ_BUS_RESET) { ++ ISR_DBG(udc, "UDC_IRQ_BUS_RESET\n"); ++ udc->gadget.speed = USB_SPEED_UNKNOWN; ++ ++ ep = &udc->ep[1]; ++ EP_DBG(ep, "dctrl:0x%x\n", ++ ast_ep_read(ep, AST_UDC_EP_DMA_CTRL)); ++ ++ if (udc->driver && udc->driver->reset) { ++ spin_unlock(&udc->lock); ++ udc->driver->reset(&udc->gadget); ++ spin_lock(&udc->lock); ++ } ++ } ++ + if (isr & UDC_IRQ_EP0_IN_ACK_STALL) { + ISR_DBG(udc, "UDC_IRQ_EP0_IN_ACK_STALL\n"); + ast_udc_ep0_in(udc); +@@ -1303,6 +1337,7 @@ + UDC_DBG(udc, "\n"); + udc->driver = driver; + udc->gadget.dev.of_node = udc->pdev->dev.of_node; ++ udc->gadget.dev.of_node_reused = true; + + for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) { + ep = &udc->ep[i]; +diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c +--- a/drivers/watchdog/aspeed_wdt.c 2025-08-01 08:48:47.000000000 +0000 ++++ b/drivers/watchdog/aspeed_wdt.c 2025-12-23 10:16:21.077033457 +0000 +@@ -35,6 +35,8 @@ + u32 irq_shift; + u32 irq_mask; + struct aspeed_wdt_scu scu; ++ u32 num_reset_masks; ++ void (*wdt_writel)(u32 val, void __iomem *addr); + }; + + struct aspeed_wdt { +@@ -44,6 +46,9 @@ + const struct aspeed_wdt_config *cfg; + }; + ++static void wdt_writel_normal(u32 val, void __iomem *addr); ++static void wdt_writel_delay(u32 val, void __iomem *addr); ++ + static const struct aspeed_wdt_config ast2400_config = { + .ext_pulse_width_mask = 0xff, + .irq_shift = 0, +@@ -54,6 +59,7 @@ + .wdt_reset_mask = 0x1, + .wdt_reset_mask_shift = 1, + }, ++ .wdt_writel = wdt_writel_normal, + }; + + static const struct aspeed_wdt_config ast2500_config = { +@@ -66,6 +72,8 @@ + .wdt_reset_mask = 0x1, + .wdt_reset_mask_shift = 2, + }, ++ .num_reset_masks = 1, ++ .wdt_writel = wdt_writel_normal, + }; + + static const struct aspeed_wdt_config ast2600_config = { +@@ -78,12 +86,29 @@ + .wdt_reset_mask = 0xf, + .wdt_reset_mask_shift = 16, + }, ++ .num_reset_masks = 2, ++ .wdt_writel = wdt_writel_normal, ++}; ++ ++static const struct aspeed_wdt_config ast2700_config = { ++ .ext_pulse_width_mask = 0xfffff, ++ .irq_shift = 0, ++ .irq_mask = GENMASK(31, 10), ++ .scu = { ++ .compatible = "aspeed,ast2700-scu0", ++ .reset_status_reg = 0x70, ++ .wdt_reset_mask = 0xf, ++ .wdt_reset_mask_shift = 0, ++ }, ++ .num_reset_masks = 5, ++ .wdt_writel = wdt_writel_delay, + }; + + static const struct of_device_id aspeed_wdt_of_table[] = { + { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config }, + { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config }, + { .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config }, ++ { .compatible = "aspeed,ast2700-wdt", .data = &ast2700_config }, + { }, + }; + MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); +@@ -96,7 +121,8 @@ + #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5) + #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5) + #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5) +-#define WDT_CTRL_1MHZ_CLK BIT(4) ++#define WDT_CTRL_RST_SOC BIT(4) ++#define WDT_CTRL_1MHZ_CLK BIT(4) /* AST2400 only */ + #define WDT_CTRL_WDT_EXT BIT(3) + #define WDT_CTRL_WDT_INTR BIT(2) + #define WDT_CTRL_RESET_SYSTEM BIT(1) +@@ -148,6 +174,17 @@ + #define WDT_DEFAULT_TIMEOUT 30 + #define WDT_RATE_1MHZ 1000000 + ++static void wdt_writel_normal(u32 val, void __iomem *addr) ++{ ++ writel(val, addr); ++} ++ ++static void wdt_writel_delay(u32 val, void __iomem *addr) ++{ ++ writel(val, addr); ++ udelay(5); ++} ++ + static struct aspeed_wdt *to_aspeed_wdt(struct watchdog_device *wdd) + { + return container_of(wdd, struct aspeed_wdt, wdd); +@@ -157,10 +194,10 @@ + { + wdt->ctrl |= WDT_CTRL_ENABLE; + +- writel(0, wdt->base + WDT_CTRL); +- writel(count, wdt->base + WDT_RELOAD_VALUE); +- writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); +- writel(wdt->ctrl, wdt->base + WDT_CTRL); ++ wdt->cfg->wdt_writel(0, wdt->base + WDT_CTRL); ++ wdt->cfg->wdt_writel(count, wdt->base + WDT_RELOAD_VALUE); ++ wdt->cfg->wdt_writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); ++ wdt->cfg->wdt_writel(wdt->ctrl, wdt->base + WDT_CTRL); + } + + static int aspeed_wdt_start(struct watchdog_device *wdd) +@@ -177,7 +214,7 @@ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + + wdt->ctrl &= ~WDT_CTRL_ENABLE; +- writel(wdt->ctrl, wdt->base + WDT_CTRL); ++ wdt->cfg->wdt_writel(wdt->ctrl, wdt->base + WDT_CTRL); + + return 0; + } +@@ -186,7 +223,7 @@ + { + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + +- writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); ++ wdt->cfg->wdt_writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); + + return 0; + } +@@ -201,8 +238,8 @@ + + actual = min(timeout, wdd->max_hw_heartbeat_ms / 1000); + +- writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE); +- writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); ++ wdt->cfg->wdt_writel(actual * WDT_RATE_1MHZ, wdt->base + WDT_RELOAD_VALUE); ++ wdt->cfg->wdt_writel(WDT_RESTART_MAGIC, wdt->base + WDT_RESTART); + + return 0; + } +@@ -222,7 +259,7 @@ + else + wdt->ctrl &= ~WDT_CTRL_WDT_INTR; + +- writel(wdt->ctrl, wdt->base + WDT_CTRL); ++ wdt->cfg->wdt_writel(wdt->ctrl, wdt->base + WDT_CTRL); + + return 0; + } +@@ -375,8 +412,10 @@ + struct aspeed_wdt *wdt = to_aspeed_wdt(wdd); + u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS); + +- if (status & WDT_TIMEOUT_STATUS_IRQ) ++ if (status & WDT_TIMEOUT_STATUS_IRQ) { + watchdog_notify_pretimeout(wdd); ++ wdt->cfg->wdt_writel(0x1, wdt->base + WDT_CLEAR_TIMEOUT_STATUS); ++ } + + return IRQ_HANDLED; + } +@@ -450,13 +489,16 @@ + ret = of_property_read_string(np, "aspeed,reset-type", &reset_type); + if (ret) { + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM; ++ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt")) ++ wdt->ctrl |= WDT_CTRL_RST_SOC; + } else { + if (!strcmp(reset_type, "cpu")) + wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU | + WDT_CTRL_RESET_SYSTEM; + else if (!strcmp(reset_type, "soc")) + wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | +- WDT_CTRL_RESET_SYSTEM; ++ WDT_CTRL_RESET_SYSTEM | ++ WDT_CTRL_RST_SOC; + else if (!strcmp(reset_type, "system")) + wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP | + WDT_CTRL_RESET_SYSTEM; +@@ -479,11 +521,11 @@ + set_bit(WDOG_HW_RUNNING, &wdt->wdd.status); + } + +- if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) || +- (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) { +- u32 reset_mask[2]; +- size_t nrstmask = of_device_is_compatible(np, "aspeed,ast2600-wdt") ? 2 : 1; ++ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt")) { ++ u32 reset_mask[5]; ++ size_t nrstmask = wdt->cfg->num_reset_masks; + u32 reg = readl(wdt->base + WDT_RESET_WIDTH); ++ int i; + + reg &= wdt->cfg->ext_pulse_width_mask; + if (of_property_read_bool(np, "aspeed,ext-active-high")) +@@ -491,7 +533,7 @@ + else + reg |= WDT_ACTIVE_LOW_MAGIC; + +- writel(reg, wdt->base + WDT_RESET_WIDTH); ++ wdt->cfg->wdt_writel(reg, wdt->base + WDT_RESET_WIDTH); + + reg &= wdt->cfg->ext_pulse_width_mask; + if (of_property_read_bool(np, "aspeed,ext-push-pull")) +@@ -499,13 +541,14 @@ + else + reg |= WDT_OPEN_DRAIN_MAGIC; + +- writel(reg, wdt->base + WDT_RESET_WIDTH); ++ wdt->cfg->wdt_writel(reg, wdt->base + WDT_RESET_WIDTH); + + ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask); + if (!ret) { +- writel(reset_mask[0], wdt->base + WDT_RESET_MASK1); +- if (nrstmask > 1) +- writel(reset_mask[1], wdt->base + WDT_RESET_MASK2); ++ for (i = 0; i < nrstmask; i++) { ++ wdt->cfg->wdt_writel(reset_mask[i], ++ wdt->base + WDT_RESET_MASK1 + i * 4); ++ } + } + } + +@@ -532,7 +575,7 @@ + * + * This implies a value of 0 gives a 1us pulse. + */ +- writel(duration - 1, wdt->base + WDT_RESET_WIDTH); ++ wdt->cfg->wdt_writel(duration - 1, wdt->base + WDT_RESET_WIDTH); + } + + aspeed_wdt_update_bootstatus(pdev, wdt); +diff --git a/include/dt-bindings/clock/aspeed,ast1700-clk.h b/include/dt-bindings/clock/aspeed,ast1700-clk.h +--- a/include/dt-bindings/clock/aspeed,ast1700-clk.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/clock/aspeed,ast1700-clk.h 2025-12-23 10:16:21.275030139 +0000 +@@ -0,0 +1,97 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 clock controller. ++ * ++ * Copyright (c) 2023 Aspeed Technology Inc. ++ */ ++ ++#ifndef __DT_BINDINGS_CLOCK_AST1700_H ++#define __DT_BINDINGS_CLOCK_AST1700_H ++ ++/* io die clk gate */ ++#define AST1700_CLK_GATE_LCLK0 (0) ++#define AST1700_CLK_GATE_LCLK1 (1) ++#define AST1700_CLK_GATE_ESPI0CLK (2) ++#define AST1700_CLK_GATE_ESPI1CLK (3) ++#define AST1700_CLK_GATE_SDCLK (4) ++#define AST1700_CLK_GATE_REFCLK (5) ++#define AST1700_CLK_GATE_RSV5CLK (6) ++#define AST1700_CLK_GATE_LPCHCLK (7) ++#define AST1700_CLK_GATE_MAC0CLK (8) ++#define AST1700_CLK_GATE_MAC1CLK (9) ++#define AST1700_CLK_GATE_MAC2CLK (10) ++#define AST1700_CLK_GATE_UART0CLK (11) ++#define AST1700_CLK_GATE_UART1CLK (12) ++#define AST1700_CLK_GATE_UART2CLK (13) ++#define AST1700_CLK_GATE_UART3CLK (14) ++/* reserved bit 15*/ ++#define AST1700_CLK_GATE_I3C0CLK (16) ++#define AST1700_CLK_GATE_I3C1CLK (17) ++#define AST1700_CLK_GATE_I3C2CLK (18) ++#define AST1700_CLK_GATE_I3C3CLK (19) ++#define AST1700_CLK_GATE_I3C4CLK (20) ++#define AST1700_CLK_GATE_I3C5CLK (21) ++#define AST1700_CLK_GATE_I3C6CLK (22) ++#define AST1700_CLK_GATE_I3C7CLK (23) ++#define AST1700_CLK_GATE_I3C8CLK (24) ++#define AST1700_CLK_GATE_I3C9CLK (25) ++#define AST1700_CLK_GATE_I3C10CLK (26) ++#define AST1700_CLK_GATE_I3C11CLK (27) ++#define AST1700_CLK_GATE_I3C12CLK (28) ++#define AST1700_CLK_GATE_I3C13CLK (29) ++#define AST1700_CLK_GATE_I3C14CLK (30) ++#define AST1700_CLK_GATE_I3C15CLK (31) ++ ++#define AST1700_CLK_GATE_UART5CLK (32 + 0) ++#define AST1700_CLK_GATE_UART6CLK (32 + 1) ++#define AST1700_CLK_GATE_UART7CLK (32 + 2) ++#define AST1700_CLK_GATE_UART8CLK (32 + 3) ++#define AST1700_CLK_GATE_UART9CLK (32 + 4) ++#define AST1700_CLK_GATE_UART10CLK (32 + 5) ++#define AST1700_CLK_GATE_UART11CLK (32 + 6) ++#define AST1700_CLK_GATE_UART12CLK (32 + 7) ++#define AST1700_CLK_GATE_FSICLK (32 + 8) ++#define AST1700_CLK_GATE_LTPIPHYCLK (32 + 9) ++#define AST1700_CLK_GATE_LTPICLK (32 + 10) ++#define AST1700_CLK_GATE_VGALCLK (32 + 11) ++#define AST1700_CLK_GATE_USBUARTCLK (32 + 12) ++#define AST1700_CLK_GATE_CANCLK (32 + 13) ++#define AST1700_CLK_GATE_PCICLK (32 + 14) ++#define AST1700_CLK_GATE_SLICLK (32 + 15) ++ ++#define AST1700_CLK_GATE_NUM (AST1700_CLK_GATE_SLICLK + 1) ++ ++/* io die clk */ ++#define AST1700_CLKIN (AST1700_CLK_GATE_NUM + 0) ++#define AST1700_CLK_HPLL (AST1700_CLK_GATE_NUM + 1) ++#define AST1700_CLK_APLL (AST1700_CLK_GATE_NUM + 2) ++#define AST1700_CLK_APLL_DIV2 (AST1700_CLK_GATE_NUM + 3) ++#define AST1700_CLK_APLL_DIV4 (AST1700_CLK_GATE_NUM + 4) ++#define AST1700_CLK_DPLL (AST1700_CLK_GATE_NUM + 5) ++#define AST1700_CLK_UXCLK (AST1700_CLK_GATE_NUM + 6) ++#define AST1700_CLK_HUXCLK (AST1700_CLK_GATE_NUM + 7) ++#define AST1700_CLK_UARTX (AST1700_CLK_GATE_NUM + 8) ++#define AST1700_CLK_HUARTX (AST1700_CLK_GATE_NUM + 9) ++#define AST1700_CLK_AHB (AST1700_CLK_GATE_NUM + 10) ++#define AST1700_CLK_APB (AST1700_CLK_GATE_NUM + 11) ++#define AST1700_CLK_UART0 (AST1700_CLK_GATE_NUM + 12) ++#define AST1700_CLK_UART1 (AST1700_CLK_GATE_NUM + 13) ++#define AST1700_CLK_UART2 (AST1700_CLK_GATE_NUM + 14) ++#define AST1700_CLK_UART3 (AST1700_CLK_GATE_NUM + 15) ++#define AST1700_CLK_UART5 (AST1700_CLK_GATE_NUM + 16) ++#define AST1700_CLK_UART6 (AST1700_CLK_GATE_NUM + 17) ++#define AST1700_CLK_UART7 (AST1700_CLK_GATE_NUM + 18) ++#define AST1700_CLK_UART8 (AST1700_CLK_GATE_NUM + 19) ++#define AST1700_CLK_UART9 (AST1700_CLK_GATE_NUM + 20) ++#define AST1700_CLK_UART10 (AST1700_CLK_GATE_NUM + 21) ++#define AST1700_CLK_UART11 (AST1700_CLK_GATE_NUM + 22) ++#define AST1700_CLK_UART12 (AST1700_CLK_GATE_NUM + 23) ++#define AST1700_CLK_HPLL_DIVN (AST1700_CLK_GATE_NUM + 24) ++#define AST1700_CLK_APLL_DIVN (AST1700_CLK_GATE_NUM + 25) ++#define AST1700_CLK_SDCLK (AST1700_CLK_GATE_NUM + 26) ++#define AST1700_CLK_RMII (AST1700_CLK_GATE_NUM + 27) ++#define AST1700_CLK_RGMII (AST1700_CLK_GATE_NUM + 28) ++#define AST1700_CLK_MACHCLK (AST1700_CLK_GATE_NUM + 29) ++ ++#define AST1700_NUM_CLKS (AST1700_CLK_MACHCLK + 1) ++#endif +diff --git a/include/dt-bindings/clock/aspeed,ast1800-clk.h b/include/dt-bindings/clock/aspeed,ast1800-clk.h +--- a/include/dt-bindings/clock/aspeed,ast1800-clk.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/clock/aspeed,ast1800-clk.h 2025-12-23 10:16:21.285029971 +0000 +@@ -0,0 +1,104 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 clock controller. ++ * ++ * Copyright (c) 2023 Aspeed Technology Inc. ++ */ ++ ++#ifndef __DT_BINDINGS_CLOCK_AST1800_H ++#define __DT_BINDINGS_CLOCK_AST1800_H ++ ++#define AST1800_CLK_GATE_MCU (0) ++#define AST1800_CLK_GATE_CONFIG (1) ++#define AST1800_CLK_GATE_MEM (2) ++#define AST1800_CLK_GATE_LTPI (3) ++#define AST1800_CLK_GATE_SPI_SLAVE (4) ++#define AST1800_CLK_GATE_I2C_SLAVE (5) ++#define AST1800_CLK_GATE_SRAM (6) ++#define AST1800_CLK_GATE_UART_DBG (7) ++/* reserved bit 8 ~ 31 */ ++#define AST1800_CLK_GATE_I3C0 (32) ++#define AST1800_CLK_GATE_I3C1 (33) ++#define AST1800_CLK_GATE_I3C2 (34) ++#define AST1800_CLK_GATE_I3C3 (35) ++#define AST1800_CLK_GATE_I3C4 (36) ++#define AST1800_CLK_GATE_I3C5 (37) ++#define AST1800_CLK_GATE_I3C6 (38) ++#define AST1800_CLK_GATE_I3C7 (39) ++#define AST1800_CLK_GATE_I3C8 (40) ++#define AST1800_CLK_GATE_I3C9 (41) ++#define AST1800_CLK_GATE_I3C10 (42) ++#define AST1800_CLK_GATE_I3C11 (43) ++#define AST1800_CLK_GATE_I3C12 (44) ++#define AST1800_CLK_GATE_I3C13 (45) ++#define AST1800_CLK_GATE_I3C14 (46) ++#define AST1800_CLK_GATE_I3C15 (47) ++/* reserved bit 16 ~ 31 */ ++#define AST1800_CLK_GATE_EFPGA0 (64) ++#define AST1800_CLK_GATE_EFPGA1 (65) ++#define AST1800_CLK_GATE_EFPGA2 (66) ++#define AST1800_CLK_GATE_EFPGA3 (67) ++#define AST1800_CLK_GATE_EFPGA4 (68) ++#define AST1800_CLK_GATE_EFPGA5 (69) ++#define AST1800_CLK_GATE_EFPGA6 (70) ++#define AST1800_CLK_GATE_EFPGA7 (71) ++#define AST1800_CLK_GATE_EFPGA_DBG (72) ++ ++#define AST1800_CLKIN (AST1800_CLK_GATE_EFPGA_DBG + 0) ++#define AST1800_CLK_HPLL (AST1800_CLK_GATE_EFPGA_DBG + 1) ++#define AST1800_CLK_HPLL_DIV2 (AST1800_CLK_GATE_EFPGA_DBG + 2) ++#define AST1800_CLK_HPLL_DIV4 (AST1800_CLK_GATE_EFPGA_DBG + 3) ++#define AST1800_CLK_HPLL_DIV5 (AST1800_CLK_GATE_EFPGA_DBG + 4) ++#define AST1800_CLK_HPLL_DIV8 (AST1800_CLK_GATE_EFPGA_DBG + 5) ++#define AST1800_CLK_HPLL_DIV10 (AST1800_CLK_GATE_EFPGA_DBG + 6) ++#define AST1800_CLK_HPLL_DIV20 (AST1800_CLK_GATE_EFPGA_DBG + 7) ++#define AST1800_CLK_HPLL_DIV25 (AST1800_CLK_GATE_EFPGA_DBG + 8) ++#define AST1800_CLK_HPLL_DIV50 (AST1800_CLK_GATE_EFPGA_DBG + 9) ++#define AST1800_CLK_HPLL_DIV100 (AST1800_CLK_GATE_EFPGA_DBG + 10) ++#define AST1800_CLK_HPLL_DIV200 (AST1800_CLK_GATE_EFPGA_DBG + 11) ++#define AST1800_CLK_HPLL_DIV1000 (AST1800_CLK_GATE_EFPGA_DBG + 12) ++#define AST1800_CLK_HPLL_EFPGA0 (AST1800_CLK_GATE_EFPGA_DBG + 13) ++#define AST1800_CLK_HPLL_EFPGA1 (AST1800_CLK_GATE_EFPGA_DBG + 14) ++#define AST1800_CLK_HPLL_EFPGA2 (AST1800_CLK_GATE_EFPGA_DBG + 15) ++#define AST1800_CLK_HPLL_EFPGA3 (AST1800_CLK_GATE_EFPGA_DBG + 16) ++#define AST1800_CLK_HPLL_EFPGA4 (AST1800_CLK_GATE_EFPGA_DBG + 17) ++#define AST1800_CLK_HPLL_EFPGA5 (AST1800_CLK_GATE_EFPGA_DBG + 18) ++#define AST1800_CLK_HPLL_EFPGA6 (AST1800_CLK_GATE_EFPGA_DBG + 19) ++#define AST1800_CLK_HPLL_EFPGA7 (AST1800_CLK_GATE_EFPGA_DBG + 20) ++#define AST1800_CLK_EPLL (AST1800_CLK_GATE_EFPGA_DBG + 21) ++#define AST1800_CLK_EPLL_DIV2 (AST1800_CLK_GATE_EFPGA_DBG + 22) ++#define AST1800_CLK_EPLL_DIV4 (AST1800_CLK_GATE_EFPGA_DBG + 23) ++#define AST1800_CLK_EPLL_DIV8 (AST1800_CLK_GATE_EFPGA_DBG + 24) ++#define AST1800_CLK_EPLL_DIV12 (AST1800_CLK_GATE_EFPGA_DBG + 25) ++#define AST1800_CLK_EPLL_EFPGA0 (AST1800_CLK_GATE_EFPGA_DBG + 26) ++#define AST1800_CLK_EPLL_EFPGA1 (AST1800_CLK_GATE_EFPGA_DBG + 27) ++#define AST1800_CLK_EPLL_EFPGA2 (AST1800_CLK_GATE_EFPGA_DBG + 28) ++#define AST1800_CLK_EPLL_EFPGA3 (AST1800_CLK_GATE_EFPGA_DBG + 29) ++#define AST1800_CLK_EPLL_EFPGA4 (AST1800_CLK_GATE_EFPGA_DBG + 30) ++#define AST1800_CLK_EPLL_EFPGA5 (AST1800_CLK_GATE_EFPGA_DBG + 31) ++#define AST1800_CLK_EPLL_EFPGA6 (AST1800_CLK_GATE_EFPGA_DBG + 32) ++#define AST1800_CLK_EPLL_EFPGA7 (AST1800_CLK_GATE_EFPGA_DBG + 33) ++#define AST1800_CLK_EFPGA0 (AST1800_CLK_GATE_EFPGA_DBG + 34) ++#define AST1800_CLK_EFPGA1 (AST1800_CLK_GATE_EFPGA_DBG + 35) ++#define AST1800_CLK_EFPGA2 (AST1800_CLK_GATE_EFPGA_DBG + 36) ++#define AST1800_CLK_EFPGA3 (AST1800_CLK_GATE_EFPGA_DBG + 37) ++#define AST1800_CLK_EFPGA4 (AST1800_CLK_GATE_EFPGA_DBG + 38) ++#define AST1800_CLK_EFPGA5 (AST1800_CLK_GATE_EFPGA_DBG + 39) ++#define AST1800_CLK_EFPGA6 (AST1800_CLK_GATE_EFPGA_DBG + 40) ++#define AST1800_CLK_EFPGA7 (AST1800_CLK_GATE_EFPGA_DBG + 41) ++#define AST1800_CLK_LPLL (AST1800_CLK_GATE_EFPGA_DBG + 42) ++#define AST1800_CLK_LPLL_DIV2 (AST1800_CLK_GATE_EFPGA_DBG + 43) ++#define AST1800_CLK_LPLL_DIV4 (AST1800_CLK_GATE_EFPGA_DBG + 44) ++#define AST1800_CLK_LPLL_DIV8 (AST1800_CLK_GATE_EFPGA_DBG + 45) ++#define AST1800_CLK_UXCLK (AST1800_CLK_GATE_EFPGA_DBG + 46) ++#define AST1800_CLK_HUXCLK (AST1800_CLK_GATE_EFPGA_DBG + 47) ++#define AST1800_CLK_UARTX (AST1800_CLK_GATE_EFPGA_DBG + 48) ++#define AST1800_CLK_HUARTX (AST1800_CLK_GATE_EFPGA_DBG + 49) ++#define AST1800_CLK_AHB (AST1800_CLK_GATE_EFPGA_DBG + 50) ++#define AST1800_CLK_APB (AST1800_CLK_GATE_EFPGA_DBG + 51) ++#define AST1800_CLK_UART (AST1800_CLK_GATE_EFPGA_DBG + 52) ++#define AST1800_CLK_I3C (AST1800_CLK_GATE_EFPGA_DBG + 53) ++ ++#define AST1800_NUM_CLKS (AST1800_CLK_I3C + 1) ++ ++#endif +diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h +--- a/include/dt-bindings/clock/aspeed,ast2700-scu.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h 2025-12-23 10:16:21.289029904 +0000 +@@ -0,0 +1,167 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 clock controller. ++ * ++ * Copyright (c) 2024 Aspeed Technology Inc. ++ */ ++ ++#ifndef __DT_BINDINGS_CLOCK_AST2700_H ++#define __DT_BINDINGS_CLOCK_AST2700_H ++ ++/* SOC0 clk */ ++#define SCU0_CLKIN 0 ++#define SCU0_CLK_24M 1 ++#define SCU0_CLK_192M 2 ++#define SCU0_CLK_UART 3 ++#define SCU0_CLK_UART_DIV13 3 ++#define SCU0_CLK_PSP 4 ++#define SCU0_CLK_HPLL 5 ++#define SCU0_CLK_HPLL_DIV2 6 ++#define SCU0_CLK_HPLL_DIV4 7 ++#define SCU0_CLK_HPLL_DIV_AHB 8 ++#define SCU0_CLK_DPLL 9 ++#define SCU0_CLK_MPLL 10 ++#define SCU0_CLK_MPLL_DIV2 11 ++#define SCU0_CLK_MPLL_DIV4 12 ++#define SCU0_CLK_MPLL_DIV8 13 ++#define SCU0_CLK_MPLL_DIV_AHB 14 ++#define SCU0_CLK_D0 15 ++#define SCU0_CLK_D1 16 ++#define SCU0_CLK_CRT0 17 ++#define SCU0_CLK_CRT1 18 ++#define SCU0_CLK_MPHY 19 ++#define SCU0_CLK_AXI0 20 ++#define SCU0_CLK_AXI1 21 ++#define SCU0_CLK_AHB 22 ++#define SCU0_CLK_APB 23 ++#define SCU0_CLK_UART4 24 ++#define SCU0_CLK_EMMCMUX 25 ++#define SCU0_CLK_EMMC 26 ++#define SCU0_CLK_U2PHY_CLK12M 27 ++#define SCU0_CLK_U2PHY_REFCLK 28 ++ ++/* SOC0 clk-gate */ ++#define SCU0_CLK_GATE_MCLK 29 ++#define SCU0_CLK_GATE_ECLK 30 ++#define SCU0_CLK_GATE_2DCLK 31 ++#define SCU0_CLK_GATE_VCLK 32 ++#define SCU0_CLK_GATE_BCLK 33 ++#define SCU0_CLK_GATE_VGA0CLK 34 ++#define SCU0_CLK_GATE_REFCLK 35 ++#define SCU0_CLK_GATE_PORTBUSB2CLK 36 ++#define SCU0_CLK_GATE_UHCICLK 37 ++#define SCU0_CLK_GATE_VGA1CLK 38 ++#define SCU0_CLK_GATE_DDRPHYCLK 39 ++#define SCU0_CLK_GATE_E2M0CLK 40 ++#define SCU0_CLK_GATE_HACCLK 41 ++#define SCU0_CLK_GATE_PORTAUSB2CLK 42 ++#define SCU0_CLK_GATE_UART4CLK 43 ++#define SCU0_CLK_GATE_SLICLK 44 ++#define SCU0_CLK_GATE_DACCLK 45 ++#define SCU0_CLK_GATE_DP 46 ++#define SCU0_CLK_GATE_E2M1CLK 47 ++#define SCU0_CLK_GATE_CRT0CLK 48 ++#define SCU0_CLK_GATE_CRT1CLK 49 ++#define SCU0_CLK_GATE_ECDSACLK 50 ++#define SCU0_CLK_GATE_RSACLK 51 ++#define SCU0_CLK_GATE_RVAS0CLK 52 ++#define SCU0_CLK_GATE_UFSCLK 53 ++#define SCU0_CLK_GATE_EMMCCLK 54 ++#define SCU0_CLK_GATE_RVAS1CLK 55 ++#define SCU0_CLK_U2PHY_REFCLKSRC 56 ++#define SCU0_CLK_AHBMUX 57 ++#define SCU0_CLK_MPHYSRC 58 ++ ++/* SOC1 clk */ ++#define SCU1_CLKIN 0 ++#define SCU1_CLK_HPLL 1 ++#define SCU1_CLK_APLL 2 ++#define SCU1_CLK_APLL_DIV2 3 ++#define SCU1_CLK_APLL_DIV4 4 ++#define SCU1_CLK_DPLL 5 ++#define SCU1_CLK_UXCLK 6 ++#define SCU1_CLK_HUXCLK 7 ++#define SCU1_CLK_UARTX 8 ++#define SCU1_CLK_HUARTX 9 ++#define SCU1_CLK_AHB 10 ++#define SCU1_CLK_APB 11 ++#define SCU1_CLK_UART0 12 ++#define SCU1_CLK_UART1 13 ++#define SCU1_CLK_UART2 14 ++#define SCU1_CLK_UART3 15 ++#define SCU1_CLK_UART5 16 ++#define SCU1_CLK_UART6 17 ++#define SCU1_CLK_UART7 18 ++#define SCU1_CLK_UART8 19 ++#define SCU1_CLK_UART9 20 ++#define SCU1_CLK_UART10 21 ++#define SCU1_CLK_UART11 22 ++#define SCU1_CLK_UART12 23 ++#define SCU1_CLK_UART13 24 ++#define SCU1_CLK_UART14 25 ++#define SCU1_CLK_APLL_DIVN 26 ++#define SCU1_CLK_SDMUX 27 ++#define SCU1_CLK_SDCLK 28 ++#define SCU1_CLK_RMII 29 ++#define SCU1_CLK_RGMII 30 ++#define SCU1_CLK_MACHCLK 31 ++#define SCU1_CLK_MAC0RCLK 32 ++#define SCU1_CLK_MAC1RCLK 33 ++#define SCU1_CLK_CAN 34 ++ ++/* SOC1 clk gate */ ++#define SCU1_CLK_GATE_LCLK0 35 ++#define SCU1_CLK_GATE_LCLK1 36 ++#define SCU1_CLK_GATE_ESPI0CLK 37 ++#define SCU1_CLK_GATE_ESPI1CLK 38 ++#define SCU1_CLK_GATE_SDCLK 39 ++#define SCU1_CLK_GATE_IPEREFCLK 40 ++#define SCU1_CLK_GATE_REFCLK 41 ++#define SCU1_CLK_GATE_LPCHCLK 42 ++#define SCU1_CLK_GATE_MAC0CLK 43 ++#define SCU1_CLK_GATE_MAC1CLK 44 ++#define SCU1_CLK_GATE_MAC2CLK 45 ++#define SCU1_CLK_GATE_UART0CLK 46 ++#define SCU1_CLK_GATE_UART1CLK 47 ++#define SCU1_CLK_GATE_UART2CLK 48 ++#define SCU1_CLK_GATE_UART3CLK 49 ++#define SCU1_CLK_GATE_I2CCLK 50 ++#define SCU1_CLK_GATE_I3C0CLK 51 ++#define SCU1_CLK_GATE_I3C1CLK 52 ++#define SCU1_CLK_GATE_I3C2CLK 53 ++#define SCU1_CLK_GATE_I3C3CLK 54 ++#define SCU1_CLK_GATE_I3C4CLK 55 ++#define SCU1_CLK_GATE_I3C5CLK 56 ++#define SCU1_CLK_GATE_I3C6CLK 57 ++#define SCU1_CLK_GATE_I3C7CLK 58 ++#define SCU1_CLK_GATE_I3C8CLK 59 ++#define SCU1_CLK_GATE_I3C9CLK 60 ++#define SCU1_CLK_GATE_I3C10CLK 61 ++#define SCU1_CLK_GATE_I3C11CLK 62 ++#define SCU1_CLK_GATE_I3C12CLK 63 ++#define SCU1_CLK_GATE_I3C13CLK 64 ++#define SCU1_CLK_GATE_I3C14CLK 65 ++#define SCU1_CLK_GATE_I3C15CLK 66 ++#define SCU1_CLK_GATE_UART5CLK 67 ++#define SCU1_CLK_GATE_UART6CLK 68 ++#define SCU1_CLK_GATE_UART7CLK 69 ++#define SCU1_CLK_GATE_UART8CLK 70 ++#define SCU1_CLK_GATE_UART9CLK 71 ++#define SCU1_CLK_GATE_UART10CLK 72 ++#define SCU1_CLK_GATE_UART11CLK 73 ++#define SCU1_CLK_GATE_UART12CLK 74 ++#define SCU1_CLK_GATE_FSICLK 75 ++#define SCU1_CLK_GATE_LTPIPHYCLK 76 ++#define SCU1_CLK_GATE_LTPICLK 77 ++#define SCU1_CLK_GATE_VGALCLK 78 ++#define SCU1_CLK_GATE_UHCICLK 79 ++#define SCU1_CLK_GATE_CANCLK 80 ++#define SCU1_CLK_GATE_PCICLK 81 ++#define SCU1_CLK_GATE_SLICLK 82 ++#define SCU1_CLK_GATE_E2MCLK 83 ++#define SCU1_CLK_GATE_PORTCUSB2CLK 84 ++#define SCU1_CLK_GATE_PORTDUSB2CLK 85 ++#define SCU1_CLK_GATE_LTPI1TXCLK 86 ++#define SCU1_CLK_I3C 87 ++ ++#endif +diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h +--- a/include/dt-bindings/clock/aspeed-clock.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/dt-bindings/clock/aspeed-clock.h 2025-12-23 10:16:21.280030055 +0000 +@@ -53,5 +53,6 @@ + #define ASPEED_RESET_AHB 8 + #define ASPEED_RESET_CRT1 9 + #define ASPEED_RESET_HACE 10 ++#define ASPEED_RESET_VIDEO 21 + + #endif +diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h +--- a/include/dt-bindings/clock/ast2600-clock.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/dt-bindings/clock/ast2600-clock.h 2025-12-23 10:16:21.294029820 +0000 +@@ -72,7 +72,7 @@ + #define ASPEED_CLK_D1CLK 55 + #define ASPEED_CLK_VCLK 56 + #define ASPEED_CLK_LHCLK 57 +-#define ASPEED_CLK_UART 58 ++#define ASPEED_CLK_UART5 58 + #define ASPEED_CLK_UARTX 59 + #define ASPEED_CLK_SDIO 60 + #define ASPEED_CLK_EMMC 61 +@@ -87,8 +87,12 @@ + #define ASPEED_CLK_MAC4RCLK 70 + #define ASPEED_CLK_I3C 71 + #define ASPEED_CLK_FSI 72 ++#define ASPEED_CLK_HUARTX 73 ++#define ASPEED_CLK_UXCLK 74 ++#define ASPEED_CLK_HUXCLK 75 + + /* Only list resets here that are not part of a clock gate + reset pair */ ++#define ASPEED_RESET_ESPI 57 + #define ASPEED_RESET_ADC 55 + #define ASPEED_RESET_JTAG_MASTER2 54 + +@@ -118,10 +122,16 @@ + #define ASPEED_RESET_DEV_MCTP 24 + #define ASPEED_RESET_RC_MCTP 23 + #define ASPEED_RESET_JTAG_MASTER 22 +-#define ASPEED_RESET_PCIE_DEV_O 21 +-#define ASPEED_RESET_PCIE_DEV_OEN 20 +-#define ASPEED_RESET_PCIE_RC_O 19 +-#define ASPEED_RESET_PCIE_RC_OEN 18 ++#define ASPEED_RESET_PCIE_DEV_OE 21 ++#define ASPEED_RESET_PCIE_DEV_O 20 ++#define ASPEED_RESET_PCIE_RC_OE 19 ++#define ASPEED_RESET_PCIE_RC_O 18 ++#define ASPEED_RESET_EMMC 16 ++#define ASPEED_RESET_CRT 13 ++#define ASPEED_RESET_MAC2 12 ++#define ASPEED_RESET_MAC1 11 ++#define ASPEED_RESET_RVAS 9 ++#define ASPEED_RESET_VIDEO 6 + #define ASPEED_RESET_PCI_DP 5 + #define ASPEED_RESET_HACE 4 + #define ASPEED_RESET_AHB 1 +diff --git a/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h +--- a/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h 2025-12-23 10:16:21.299029736 +0000 +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 E2M interrupt ++ * controller. ++ * ++ * Copyright (c) 2024 Aspeed Technology Inc. ++ */ ++ ++#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_ ++#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_ ++ ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT0 0 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT1 1 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT2 2 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT3 3 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT4 4 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT5 5 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT6 6 ++#define ASPEED_AST2700_E2M_MMBI_H2B_INT7 7 ++ ++#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_ */ +diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h +--- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h 2025-12-23 10:16:21.304029653 +0000 +@@ -20,4 +20,18 @@ + #define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0 + #define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1 + ++#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3 ++#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2 ++ ++#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3 ++#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2 ++ ++#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3 ++#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2 ++#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1 ++#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0 ++ ++#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1 ++#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0 ++ + #endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */ +diff --git a/include/dt-bindings/reset/aspeed,ast1700-reset.h b/include/dt-bindings/reset/aspeed,ast1700-reset.h +--- a/include/dt-bindings/reset/aspeed,ast1700-reset.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/reset/aspeed,ast1700-reset.h 2025-12-23 10:16:21.317029435 +0000 +@@ -0,0 +1,69 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 reset controller. ++ * ++ * Copyright (c) 2023 Aspeed Technology Inc. ++ */ ++ ++#ifndef _MACH_ASPEED_AST1700_RESET_H_ ++#define _MACH_ASPEED_AST1700_RESET_H_ ++ ++#define AST1700_RESET_LPC0 (0) ++#define AST1700_RESET_LPC1 (1) ++#define AST1700_RESET_MII (2) ++#define AST1700_RESET_PECI (3) ++#define AST1700_RESET_PWM (4) ++#define AST1700_RESET_MAC0 (5) ++#define AST1700_RESET_MAC1 (6) ++#define AST1700_RESET_MAC2 (7) ++#define AST1700_RESET_ADC (8) ++#define AST1700_RESET_SD (9) ++#define AST1700_RESET_ESPI0 (10) ++#define AST1700_RESET_ESPI1 (11) ++#define AST1700_RESET_JTAG1 (12) ++#define AST1700_RESET_SPI0 (13) ++#define AST1700_RESET_SPI1 (14) ++#define AST1700_RESET_SPI2 (15) ++#define AST1700_RESET_I3C0 (16) ++#define AST1700_RESET_I3C1 (17) ++#define AST1700_RESET_I3C2 (18) ++#define AST1700_RESET_I3C3 (19) ++#define AST1700_RESET_I3C4 (20) ++#define AST1700_RESET_I3C5 (21) ++#define AST1700_RESET_I3C6 (22) ++#define AST1700_RESET_I3C7 (23) ++#define AST1700_RESET_I3C8 (24) ++#define AST1700_RESET_I3C9 (25) ++#define AST1700_RESET_I3C10 (26) ++#define AST1700_RESET_I3C11 (27) ++#define AST1700_RESET_I3C12 (28) ++#define AST1700_RESET_I3C13 (29) ++#define AST1700_RESET_I3C14 (30) ++#define AST1700_RESET_I3C15 (31) ++/* reserved 32 */ ++#define AST1700_RESET_IOMCU (33) ++#define AST1700_RESET_H2A_SPI1 (34) ++#define AST1700_RESET_H2A_SPI2 (35) ++#define AST1700_RESET_UART0 (36) ++#define AST1700_RESET_UART1 (37) ++#define AST1700_RESET_UART2 (38) ++#define AST1700_RESET_UART3 (39) ++#define AST1700_RESET_I2C_FILTER (40) ++#define AST1700_RESET_CALIPTRA (41) ++/* reserved 42:43 */ ++#define AST1700_RESET_FSI (44) ++#define AST1700_RESET_CAN (45) ++#define AST1700_RESET_MCTP (46) ++#define AST1700_RESET_I2C (47) ++#define AST1700_RESET_UART6 (48) ++#define AST1700_RESET_UART7 (49) ++#define AST1700_RESET_UART8 (50) ++#define AST1700_RESET_UART9 (51) ++#define AST1700_RESET_LTPI (52) ++#define AST1700_RESET_VGAL (53) ++/* reserved 54:62 */ ++#define AST1700_RESET_I3CDMA (63) ++ ++#define AST1700_RESET_NUMS (AST1700_RESET_I3CDMA + 1) ++ ++#endif /* _MACH_ASPEED_AST1700_RESET_H_ */ +diff --git a/include/dt-bindings/reset/aspeed,ast1800-reset.h b/include/dt-bindings/reset/aspeed,ast1800-reset.h +--- a/include/dt-bindings/reset/aspeed,ast1800-reset.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/reset/aspeed,ast1800-reset.h 2025-12-23 10:16:21.326029284 +0000 +@@ -0,0 +1,61 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST1800 reset controller. ++ * ++ * Copyright (c) 2025 Aspeed Technology Inc. ++ */ ++ ++#ifndef _MACH_ASPEED_AST1800_RESET_H_ ++#define _MACH_ASPEED_AST1800_RESET_H_ ++ ++#define AST1800_RESET_CONFIG (0) ++#define AST1800_RESET_MEM (1) ++#define AST1800_RESET_LTPI (2) ++#define AST1800_RESET_EFPGA_DBG (3) ++#define AST1800_RESET_SPI_SLAVE (4) ++#define AST1800_RESET_I2C_SLAVE (5) ++/* reserved 6 7 */ ++#define AST1800_RESET_I2C_MASTER (8) ++#define AST1800_RESET_I3C_DMA (9) ++#define AST1800_RESET_ADC (10) ++#define AST1800_RESET_GPIO (11) ++#define AST1800_RESET_JTAG (12) ++#define AST1800_RESET_PWM (13) ++#define AST1800_RESET_UART_DBG (14) ++/* reserved 15~31 */ ++#define AST1800_RESET_I3C0 (32) ++#define AST1800_RESET_I3C1 (33) ++#define AST1800_RESET_I3C2 (34) ++#define AST1800_RESET_I3C3 (35) ++#define AST1800_RESET_I3C4 (36) ++#define AST1800_RESET_I3C5 (37) ++#define AST1800_RESET_I3C6 (38) ++#define AST1800_RESET_I3C7 (39) ++#define AST1800_RESET_I3C8 (40) ++#define AST1800_RESET_I3C9 (41) ++#define AST1800_RESET_I3C10 (42) ++#define AST1800_RESET_I3C11 (43) ++#define AST1800_RESET_I3C12 (44) ++#define AST1800_RESET_I3C13 (45) ++#define AST1800_RESET_I3C14 (46) ++#define AST1800_RESET_I3C15 (47) ++#define AST1800_RESET_I2C0 (48) ++#define AST1800_RESET_I2C1 (49) ++#define AST1800_RESET_I2C2 (50) ++#define AST1800_RESET_I2C3 (51) ++#define AST1800_RESET_I2C4 (52) ++#define AST1800_RESET_I2C5 (53) ++#define AST1800_RESET_I2C6 (54) ++#define AST1800_RESET_I2C7 (55) ++#define AST1800_RESET_I2C8 (56) ++#define AST1800_RESET_I2C9 (57) ++#define AST1800_RESET_I2C10 (58) ++#define AST1800_RESET_I2C11 (59) ++#define AST1800_RESET_I2C12 (60) ++#define AST1800_RESET_I2C13 (61) ++#define AST1800_RESET_I2C14 (62) ++#define AST1800_RESET_I2C15 (63) ++ ++#define AST1800_RESET_NUMS (AST1800_RESET_I2C15 + 1) ++ ++#endif /* _MACH_ASPEED_AST1800_RESET_H_ */ +diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h +--- a/include/dt-bindings/reset/aspeed,ast2700-scu.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h 2025-12-23 10:16:21.322029351 +0000 +@@ -0,0 +1,125 @@ ++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ ++/* ++ * Device Tree binding constants for AST2700 reset controller. ++ * ++ * Copyright (c) 2024 Aspeed Technology Inc. ++ */ ++ ++#ifndef _MACH_ASPEED_AST2700_RESET_H_ ++#define _MACH_ASPEED_AST2700_RESET_H_ ++ ++/* SOC0 */ ++#define SCU0_RESET_SDRAM 0 ++#define SCU0_RESET_DDRPHY 1 ++#define SCU0_RESET_RSA 2 ++#define SCU0_RESET_SHA3 3 ++#define SCU0_RESET_HACE 4 ++#define SCU0_RESET_SOC 5 ++#define SCU0_RESET_VIDEO 6 ++#define SCU0_RESET_2D 7 ++#define SCU0_RESET_PCIS 8 ++#define SCU0_RESET_RVAS0 9 ++#define SCU0_RESET_RVAS1 10 ++#define SCU0_RESET_SM3 11 ++#define SCU0_RESET_SM4 12 ++#define SCU0_RESET_CRT0 13 ++#define SCU0_RESET_ECC 14 ++#define SCU0_RESET_DP_PCI 15 ++#define SCU0_RESET_UFS 16 ++#define SCU0_RESET_EMMC 17 ++#define SCU0_RESET_PCIE1RST 18 ++#define SCU0_RESET_PCIE1RSTOE 19 ++#define SCU0_RESET_PCIE0RST 20 ++#define SCU0_RESET_PCIE0RSTOE 21 ++#define SCU0_RESET_JTAG 22 ++#define SCU0_RESET_MCTP0 23 ++#define SCU0_RESET_MCTP1 24 ++#define SCU0_RESET_XDMA0 25 ++#define SCU0_RESET_XDMA1 26 ++#define SCU0_RESET_H2X1 27 ++#define SCU0_RESET_DP 28 ++#define SCU0_RESET_DP_MCU 29 ++#define SCU0_RESET_SSP 30 ++#define SCU0_RESET_H2X0 31 ++#define SCU0_RESET_PORTA_VHUB 32 ++#define SCU0_RESET_PORTA_PHY3 33 ++#define SCU0_RESET_PORTA_XHCI 34 ++#define SCU0_RESET_PORTB_VHUB 35 ++#define SCU0_RESET_PORTB_PHY3 36 ++#define SCU0_RESET_PORTB_XHCI 37 ++#define SCU0_RESET_PORTA_VHUB_EHCI 38 ++#define SCU0_RESET_PORTB_VHUB_EHCI 39 ++#define SCU0_RESET_UHCI 40 ++#define SCU0_RESET_TSP 41 ++#define SCU0_RESET_E2M0 42 ++#define SCU0_RESET_E2M1 43 ++#define SCU0_RESET_VLINK 44 ++ ++/* SOC1 */ ++#define SCU1_RESET_LPC0 0 ++#define SCU1_RESET_LPC1 1 ++#define SCU1_RESET_MII 2 ++#define SCU1_RESET_PECI 3 ++#define SCU1_RESET_PWM 4 ++#define SCU1_RESET_MAC0 5 ++#define SCU1_RESET_MAC1 6 ++#define SCU1_RESET_MAC2 7 ++#define SCU1_RESET_ADC 8 ++#define SCU1_RESET_SD 9 ++#define SCU1_RESET_ESPI0 10 ++#define SCU1_RESET_ESPI1 11 ++#define SCU1_RESET_JTAG1 12 ++#define SCU1_RESET_SPI0 13 ++#define SCU1_RESET_SPI1 14 ++#define SCU1_RESET_SPI2 15 ++#define SCU1_RESET_I3C0 16 ++#define SCU1_RESET_I3C1 17 ++#define SCU1_RESET_I3C2 18 ++#define SCU1_RESET_I3C3 19 ++#define SCU1_RESET_I3C4 20 ++#define SCU1_RESET_I3C5 21 ++#define SCU1_RESET_I3C6 22 ++#define SCU1_RESET_I3C7 23 ++#define SCU1_RESET_I3C8 24 ++#define SCU1_RESET_I3C9 25 ++#define SCU1_RESET_I3C10 26 ++#define SCU1_RESET_I3C11 27 ++#define SCU1_RESET_I3C12 28 ++#define SCU1_RESET_I3C13 29 ++#define SCU1_RESET_I3C14 30 ++#define SCU1_RESET_I3C15 31 ++#define SCU1_RESET_MCU0 32 ++#define SCU1_RESET_MCU1 33 ++#define SCU1_RESET_H2A_SPI1 34 ++#define SCU1_RESET_H2A_SPI2 35 ++#define SCU1_RESET_UART0 36 ++#define SCU1_RESET_UART1 37 ++#define SCU1_RESET_UART2 38 ++#define SCU1_RESET_UART3 39 ++#define SCU1_RESET_I2C_FILTER 40 ++#define SCU1_RESET_CALIPTRA 41 ++#define SCU1_RESET_XDMA 42 ++#define SCU1_RESET_FSI 43 ++#define SCU1_RESET_CAN 44 ++#define SCU1_RESET_MCTP 45 ++#define SCU1_RESET_I2C 46 ++#define SCU1_RESET_UART6 47 ++#define SCU1_RESET_UART7 48 ++#define SCU1_RESET_UART8 49 ++#define SCU1_RESET_UART9 50 ++#define SCU1_RESET_LTPI0 51 ++#define SCU1_RESET_VGAL 52 ++#define SCU1_RESET_LTPI1 53 ++#define SCU1_RESET_ACE 54 ++#define SCU1_RESET_E2M 55 ++#define SCU1_RESET_UHCI 56 ++#define SCU1_RESET_PORTC_USB2UART 57 ++#define SCU1_RESET_PORTC_VHUB_EHCI 58 ++#define SCU1_RESET_PORTD_USB2UART 59 ++#define SCU1_RESET_PORTD_VHUB_EHCI 60 ++#define SCU1_RESET_H2X 61 ++#define SCU1_RESET_I3CDMA 62 ++#define SCU1_RESET_PCIE2RST 63 ++#define SCU1_RESET_XPCS 64 /* for AST2755 */ ++ ++#endif /* _MACH_ASPEED_AST2700_RESET_H_ */ +diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h +--- a/include/dt-bindings/watchdog/aspeed-wdt.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/dt-bindings/watchdog/aspeed-wdt.h 2025-12-23 10:16:21.313029502 +0000 +@@ -89,4 +89,142 @@ + + #define AST2600_WDT_RESET2_DEFAULT 0x03fffff1 + ++#define AST2700_WDT_RESET1_CPU (1 << 0) ++#define AST2700_WDT_RESET1_DRAM (1 << 1) ++#define AST2700_WDT_RESET1_SLI0 (1 << 2) ++#define AST2700_WDT_RESET1_EHCI (1 << 3) ++#define AST2700_WDT_RESET1_HACE (1 << 4) ++#define AST2700_WDT_RESET1_SOC_MISC0 (1 << 5) ++#define AST2700_WDT_RESET1_VIDEO (1 << 6) ++#define AST2700_WDT_RESET1_2D_GRAPHIC (1 << 7) ++#define AST2700_WDT_RESET1_RAVS0 (1 << 8) ++#define AST2700_WDT_RESET1_RAVS1 (1 << 9) ++#define AST2700_WDT_RESET1_GPIO0 (1 << 10) ++#define AST2700_WDT_RESET1_SSP (1 << 11) ++#define AST2700_WDT_RESET1_TSP (1 << 12) ++#define AST2700_WDT_RESET1_CRT (1 << 13) ++#define AST2700_WDT_RESET1_USB20_HOST (1 << 14) ++#define AST2700_WDT_RESET1_USB11_HOST (1 << 15) ++#define AST2700_WDT_RESET1_UFS (1 << 16) ++#define AST2700_WDT_RESET1_EMMC (1 << 17) ++#define AST2700_WDT_RESET1_AHB_TO_PCIE1 (1 << 18) ++#define AST2700_WDT_RESET1_XDMA0 (1 << 22) ++#define AST2700_WDT_RESET1_MCTP1 (1 << 23) ++#define AST2700_WDT_RESET1_MCTP0 (1 << 24) ++#define AST2700_WDT_RESET1_JTAG0 (1 << 25) ++#define AST2700_WDT_RESET1_ECC (1 << 26) ++#define AST2700_WDT_RESET1_XDMA1 (1 << 27) ++#define AST2700_WDT_RESET1_DP (1 << 28) ++#define AST2700_WDT_RESET1_DP_MCU (1 << 29) ++#define AST2700_WDT_RESET1_AHB_TO_PCIE0 (1 << 31) ++ ++#define AST2700_WDT_RESET1_DEFAULT 0x8207ff71 ++ ++#define AST2700_WDT_RESET2_USB3_A_HOST (1 << 0) ++#define AST2700_WDT_RESET2_USB3_A_VHUB3 (1 << 1) ++#define AST2700_WDT_RESET2_USB3_A_VHUB2 (1 << 2) ++#define AST2700_WDT_RESET2_USB3_B_HOST (1 << 3) ++#define AST2700_WDT_RESET2_USB3_B_VHUB3 (1 << 4) ++#define AST2700_WDT_RESET2_USB3_B_VHUB2 (1 << 5) ++#define AST2700_WDT_RESET2_SM3 (1 << 6) ++#define AST2700_WDT_RESET2_SM4 (1 << 7) ++#define AST2700_WDT_RESET2_SHA3 (1 << 8) ++#define AST2700_WDT_RESET2_RSA (1 << 9) ++ ++#define AST2700_WDT_RESET2_DEFAULT 0x000003f6 ++ ++#define AST2700_WDT_RESET3_LPC0 (1 << 0) ++#define AST2700_WDT_RESET3_LPC1 (1 << 1) ++#define AST2700_WDT_RESET3_MDIO (1 << 2) ++#define AST2700_WDT_RESET3_PECI (1 << 3) ++#define AST2700_WDT_RESET3_PWM (1 << 4) ++#define AST2700_WDT_RESET3_MAC0 (1 << 5) ++#define AST2700_WDT_RESET3_MAC1 (1 << 6) ++#define AST2700_WDT_RESET3_MAC2 (1 << 7) ++#define AST2700_WDT_RESET3_ADC (1 << 8) ++#define AST2700_WDT_RESET3_SDC (1 << 9) ++#define AST2700_WDT_RESET3_ESPI0 (1 << 10) ++#define AST2700_WDT_RESET3_ESPI1 (1 << 11) ++#define AST2700_WDT_RESET3_JTAG1 (1 << 12) ++#define AST2700_WDT_RESET3_SPI0 (1 << 13) ++#define AST2700_WDT_RESET3_SPI1 (1 << 14) ++#define AST2700_WDT_RESET3_SPI2 (1 << 15) ++#define AST2700_WDT_RESET3_I3C0 (1 << 16) ++#define AST2700_WDT_RESET3_I3C1 (1 << 17) ++#define AST2700_WDT_RESET3_I3C2 (1 << 18) ++#define AST2700_WDT_RESET3_I3C3 (1 << 19) ++#define AST2700_WDT_RESET3_I3C4 (1 << 20) ++#define AST2700_WDT_RESET3_I3C5 (1 << 21) ++#define AST2700_WDT_RESET3_I3C6 (1 << 22) ++#define AST2700_WDT_RESET3_I3C7 (1 << 23) ++#define AST2700_WDT_RESET3_I3C8 (1 << 24) ++#define AST2700_WDT_RESET3_I3C9 (1 << 25) ++#define AST2700_WDT_RESET3_I3C10 (1 << 26) ++#define AST2700_WDT_RESET3_I3C11 (1 << 27) ++#define AST2700_WDT_RESET3_I3C12 (1 << 28) ++#define AST2700_WDT_RESET3_I3C13 (1 << 29) ++#define AST2700_WDT_RESET3_I3C14 (1 << 30) ++#define AST2700_WDT_RESET3_I3C15 (1 << 31) ++ ++#define AST2700_WDT_RESET3_DEFAULT 0x000093ec ++ ++#define AST2700_WDT_RESET4_FMC (1 << 0) ++#define AST2700_WDT_RESET4_SOC_MISC1 (1 << 1) ++#define AST2700_WDT_RESET4_AHB (1 << 2) ++#define AST2700_WDT_RESET4_SLI1 (1 << 3) ++#define AST2700_WDT_RESET4_UART0 (1 << 4) ++#define AST2700_WDT_RESET4_UART1 (1 << 5) ++#define AST2700_WDT_RESET4_UART2 (1 << 6) ++#define AST2700_WDT_RESET4_UART3 (1 << 7) ++#define AST2700_WDT_RESET4_I2C_MONITOR (1 << 8) ++#define AST2700_WDT_RESET4_HOST_TO_SPI1 (1 << 9) ++#define AST2700_WDT_RESET4_HOST_TO_SPI2 (1 << 10) ++#define AST2700_WDT_RESET4_GPIO1 (1 << 11) ++#define AST2700_WDT_RESET4_FSI (1 << 12) ++#define AST2700_WDT_RESET4_CANBUS (1 << 13) ++#define AST2700_WDT_RESET4_MCTP (1 << 14) ++#define AST2700_WDT_RESET4_XDMA (1 << 15) ++#define AST2700_WDT_RESET4_UART5 (1 << 16) ++#define AST2700_WDT_RESET4_UART6 (1 << 17) ++#define AST2700_WDT_RESET4_UART7 (1 << 18) ++#define AST2700_WDT_RESET4_UART8 (1 << 19) ++#define AST2700_WDT_RESET4_BOOT_MCU (1 << 20) ++#define AST2700_WDT_RESET4_IO_MCU (1 << 21) ++#define AST2700_WDT_RESET4_LTPI0 (1 << 22) ++#define AST2700_WDT_RESET4_VGA_LINK (1 << 23) ++#define AST2700_WDT_RESET4_LTPI1 (1 << 24) ++#define AST2700_WDT_RESET4_LTPI_PHY (1 << 25) ++#define AST2700_WDT_RESET4_ACE (1 << 26) ++#define AST2700_WDT_RESET4_LTPI_GPIO0 (1 << 28) ++#define AST2700_WDT_RESET4_LTPI_GPIO1 (1 << 29) ++#define AST2700_WDT_RESET4_AHB_TO_PCIE1 (1 << 30) ++#define AST2700_WDT_RESET4_I3C_DMA (1 << 31) ++ ++#define AST2700_WDT_RESET4_DEFAULT 0x40303803 ++ ++#define AST2700_WDT_RESET5_I2C_GLOBAL (1 << 0) ++#define AST2700_WDT_RESET5_I2C0 (1 << 1) ++#define AST2700_WDT_RESET5_I2C1 (1 << 2) ++#define AST2700_WDT_RESET5_I2C2 (1 << 3) ++#define AST2700_WDT_RESET5_I2C3 (1 << 4) ++#define AST2700_WDT_RESET5_I2C4 (1 << 5) ++#define AST2700_WDT_RESET5_I2C5 (1 << 6) ++#define AST2700_WDT_RESET5_I2C6 (1 << 7) ++#define AST2700_WDT_RESET5_I2C7 (1 << 8) ++#define AST2700_WDT_RESET5_I2C8 (1 << 9) ++#define AST2700_WDT_RESET5_I2C9 (1 << 10) ++#define AST2700_WDT_RESET5_I2C10 (1 << 11) ++#define AST2700_WDT_RESET5_I2C11 (1 << 12) ++#define AST2700_WDT_RESET5_I2C12 (1 << 13) ++#define AST2700_WDT_RESET5_I2C13 (1 << 14) ++#define AST2700_WDT_RESET5_I2C14 (1 << 15) ++#define AST2700_WDT_RESET5_I2C15 (1 << 16) ++#define AST2700_WDT_RESET5_UHCI (1 << 17) ++#define AST2700_WDT_RESET5_USB2_C_UART (1 << 18) ++#define AST2700_WDT_RESET5_USB2_C (1 << 19) ++#define AST2700_WDT_RESET5_USB2_D_UART (1 << 20) ++#define AST2700_WDT_RESET5_USB2_D (1 << 21) ++ ++#define AST2700_WDT_RESET5_DEFAULT 0x00320000 ++ + #endif +diff --git a/include/linux/aspeed-mctp.h b/include/linux/aspeed-mctp.h +--- a/include/linux/aspeed-mctp.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/linux/aspeed-mctp.h 2025-12-23 10:16:21.266030290 +0000 +@@ -0,0 +1,157 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* Copyright (c) 2025 ASPEED Tech */ ++ ++#ifndef __LINUX_ASPEED_MCTP_H ++#define __LINUX_ASPEED_MCTP_H ++ ++#include ++ ++struct mctp_client; ++struct aspeed_mctp; ++ ++struct pcie_transport_hdr { ++ u8 fmt_type; ++ u8 mbz; ++ u8 mbz_attr_len_hi; ++ u8 len_lo; ++ u16 requester; ++ u8 tag; ++ u8 code; ++ u16 target; ++ u16 vendor; ++} __packed; ++ ++struct mctp_protocol_hdr { ++ u8 ver; ++ u8 dest; ++ u8 src; ++ u8 flags_seq_tag; ++} __packed; ++ ++#define PCIE_VDM_HDR_SIZE 16 ++#define MCTP_BTU_SIZE 64 ++/* The MTU of the ASPEED MCTP can be 64/128/256 */ ++#define ASPEED_MCTP_MTU MCTP_BTU_SIZE ++#define PCIE_VDM_DATA_SIZE_DW (ASPEED_MCTP_MTU / 4) ++#define PCIE_VDM_HDR_SIZE_DW (PCIE_VDM_HDR_SIZE / 4) ++ ++#define PCIE_MCTP_MIN_PACKET_SIZE (PCIE_VDM_HDR_SIZE + 4) ++ ++struct mctp_pcie_packet_data_2500 { ++ u32 data[32]; ++}; ++ ++struct mctp_pcie_packet_data { ++ u32 hdr[PCIE_VDM_HDR_SIZE_DW]; ++ u32 payload[PCIE_VDM_DATA_SIZE_DW]; ++}; ++ ++struct mctp_pcie_packet { ++ struct mctp_pcie_packet_data data; ++ u32 size; ++}; ++ ++/** ++ * aspeed_mctp_add_type_handler() - register for the given MCTP message type ++ * @client: pointer to the existing mctp_client context ++ * @mctp_type: message type code according to DMTF DSP0239 spec. ++ * @pci_vendor_id: vendor ID (non-zero if msg_type is Vendor Defined PCI, ++ * otherwise it should be set to 0) ++ * @vdm_type: vendor defined message type (it should be set to 0 for non-Vendor ++ * Defined PCI message type) ++ * @vdm_mask: vendor defined message mask (it should be set to 0 for non-Vendor ++ * Defined PCI message type) ++ * ++ * Return: ++ * * 0 - success, ++ * * -EINVAL - arguments passed are incorrect, ++ * * -ENOMEM - cannot alloc a new handler, ++ * * -EBUSY - given message has already registered handler. ++ */ ++ ++int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type, ++ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask); ++ ++/** ++ * aspeed_mctp_create_client() - create mctp_client context ++ * @priv pointer to aspeed-mctp context ++ * ++ * Returns struct mctp_client or NULL. ++ */ ++struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv); ++ ++/** ++ * aspeed_mctp_delete_client()- delete mctp_client context ++ * @client: pointer to existing mctp_client context ++ */ ++void aspeed_mctp_delete_client(struct mctp_client *client); ++ ++/** ++ * aspeed_mctp_send_packet() - send mctp_packet ++ * @client: pointer to existing mctp_client context ++ * @tx_packet: the allocated packet that needs to be send via aspeed-mctp ++ * ++ * After the function returns success, the packet is no longer owned by the ++ * caller, and as such, the caller should not attempt to free it. ++ * ++ * Return: ++ * * 0 - success, ++ * * -ENOSPC - failed to send packet due to lack of available space. ++ */ ++int aspeed_mctp_send_packet(struct mctp_client *client, ++ struct mctp_pcie_packet *tx_packet); ++ ++/** ++ * aspeed_mctp_receive_packet() - receive mctp_packet ++ * @client: pointer to existing mctp_client context ++ * @timeout: timeout, in jiffies ++ * ++ * The function will sleep for up to @timeout if no packet is ready to read. ++ * ++ * After the function returns valid packet, the caller takes its ownership and ++ * is responsible for freeing it. ++ * ++ * Returns struct mctp_pcie_packet from or ERR_PTR in case of error or the ++ * @timeout elapsed. ++ */ ++struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client, ++ unsigned long timeout); ++ ++/** ++ * aspeed_mctp_flush_rx_queue() - remove all mctp_packets from rx queue ++ * @client: pointer to existing mctp_client context ++ */ ++void aspeed_mctp_flush_rx_queue(struct mctp_client *client); ++ ++/** ++ * aspeed_mctp_get_eid_bdf() - return PCIe address for requested endpoint ID ++ * @client: pointer to existing mctp_client context ++ * @eid: requested eid ++ * @bdf: pointer to store BDF value ++ * ++ * Return: ++ * * 0 - success, ++ * * -ENOENT - there is no record for requested endpoint id. ++ */ ++int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf); ++ ++/** ++ * aspeed_mctp_get_eid() - return EID for requested BDF and domainId. ++ * @client: pointer to existing mctp_client context ++ * @bdf: requested BDF value ++ * @domain_id: requested domainId ++ * @eid: pointer to store EID value ++ * ++ * Return: ++ * * 0 - success, ++ * * -ENOENT - there is no record for requested bdf/domainId. ++ */ ++int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf, ++ u8 domain_id, u8 *eid); ++ ++void *aspeed_mctp_packet_alloc(gfp_t flags); ++void aspeed_mctp_packet_free(void *packet); ++ ++int aspeed_mctp_register_default_handler(struct mctp_client *client); ++ ++#endif /* __LINUX_ASPEED_MCTP_H */ +diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h +--- a/include/linux/clk-provider.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/linux/clk-provider.h 2025-12-23 10:16:22.000017988 +0000 +@@ -623,6 +623,24 @@ + NULL, (flags), (reg), (bit_idx), \ + (clk_gate_flags), (lock)) + /** ++ * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock ++ * framework ++ * @dev: device that is registering this clock ++ * @name: name of this clock ++ * @parent_hw: pointer to parent clk ++ * @flags: framework-specific flags for this clock ++ * @reg: register address to control gating of this clock ++ * @bit_idx: which bit in the register controls gating of this clock ++ * @clk_gate_flags: gate-specific flags for this clock ++ * @lock: shared register lock for this clock ++ */ ++#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \ ++ reg, bit_idx, clk_gate_flags, \ ++ lock) \ ++ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \ ++ NULL, (flags), (reg), (bit_idx), \ ++ (clk_gate_flags), (lock)) ++/** + * devm_clk_hw_register_gate_parent_data - register a gate clock with the + * clock framework + * @dev: device that is registering this clock +diff --git a/include/linux/mctp-pcie-vdm.h b/include/linux/mctp-pcie-vdm.h +--- a/include/linux/mctp-pcie-vdm.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/linux/mctp-pcie-vdm.h 2025-12-23 10:16:21.973018440 +0000 +@@ -0,0 +1,36 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * mctp-pcie-vdm.h - MCTP-over-PCIe-VDM (DMTF DSP0238) transport binding Interface ++ * for PCIe VDM devices to register and implement. ++ * ++ */ ++ ++#ifndef __LINUX_MCTP_PCIE_VDM_H ++#define __LINUX_MCTP_PCIE_VDM_H ++ ++#include ++#include ++ ++#ifdef CONFIG_MCTP_TRANSPORT_PCIE_VDM ++ ++/** ++ * @send_packet: referenced to send packets with PCIe VDM header packed. ++ * @recv_packet: referenced multiple times until no RX packet to be handled. ++ * received pointer shall start from the PCIe VDM header. ++ * @free_packet: referenced when the packet is processed and okay to be freed. ++ * @uninit: uninitialize the device. ++ */ ++struct mctp_pcie_vdm_ops { ++ int (*send_packet)(struct device *dev, u8 *data, size_t len); ++ u8 *(*recv_packet)(struct device *dev); ++ void (*free_packet)(void *packet); ++ void (*uninit)(struct device *dev); ++}; ++ ++struct net_device *mctp_pcie_vdm_add_dev(struct device *dev, ++ const struct mctp_pcie_vdm_ops *ops); ++void mctp_pcie_vdm_receive_packet(struct net_device *ndev); ++void mctp_pcie_vdm_remove_dev(struct net_device *ndev); ++ ++#endif /* CONFIG_MCTP_TRANSPORT_PCIE_VDM */ ++#endif /* __LINUX_MCTP_PCIE_VDM_H */ +diff --git a/include/linux/pwm.h b/include/linux/pwm.h +--- a/include/linux/pwm.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/linux/pwm.h 2025-12-23 10:16:22.035017401 +0000 +@@ -405,6 +405,9 @@ + int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner); + #define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE) + ++struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, ++ unsigned int index, const char *label); ++ + struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip, + const struct of_phandle_args *args); + struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip, +diff --git a/include/linux/soc/aspeed/aspeed-otp.h b/include/linux/soc/aspeed/aspeed-otp.h +--- a/include/linux/soc/aspeed/aspeed-otp.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/linux/soc/aspeed/aspeed-otp.h 2025-12-23 10:16:21.257030440 +0000 +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ ++/* ++ * Copyright (C) 2021 ASPEED Technology Inc. ++ */ ++ ++#ifndef _LINUX_ASPEED_OTP_H ++#define _LINUX_ASPEED_OTP_H ++ ++void otp_read_data_buf(u32 offset, u32 *buf, u32 len); ++void otp_read_conf_buf(u32 offset, u32 *buf, u32 len); ++ ++#endif /* _LINUX_ASPEED_OTP_H */ +diff --git a/include/linux/soc/aspeed/aspeed-udma.h b/include/linux/soc/aspeed/aspeed-udma.h +--- a/include/linux/soc/aspeed/aspeed-udma.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/linux/soc/aspeed/aspeed-udma.h 2025-12-23 10:16:21.252030524 +0000 +@@ -0,0 +1,28 @@ ++#ifndef __ASPEED_UDMA_H__ ++#define __ASPEED_UDMA_H__ ++ ++typedef void (*aspeed_udma_cb_t)(int fifo_rwptr, void *id); ++ ++enum aspeed_udma_ops { ++ ASPEED_UDMA_OP_ENABLE, ++ ASPEED_UDMA_OP_DISABLE, ++ ASPEED_UDMA_OP_RESET, ++}; ++ ++void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr); ++void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr); ++ ++void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op); ++void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op); ++ ++int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr, ++ struct kfifo *fifo, u32 fifo_sz, ++ aspeed_udma_cb_t cb, void *id, bool en_tmout); ++int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr, ++ struct kfifo *fifo, u32 fifo_sz, ++ aspeed_udma_cb_t cb, void *id, bool en_tmout); ++ ++int aspeed_udma_free_tx_chan(u32 ch_no); ++int aspeed_udma_free_rx_chan(u32 ch_no); ++ ++#endif +diff --git a/include/soc/aspeed/reset-aspeed.h b/include/soc/aspeed/reset-aspeed.h +--- a/include/soc/aspeed/reset-aspeed.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/soc/aspeed/reset-aspeed.h 2025-12-23 10:16:21.214031161 +0000 +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (c) 2024 ASPEED Technology Inc. ++ * Author: Ryan Chen ++ */ ++ ++#ifndef __RESET_ASPEED_H__ ++#define __RESET_ASPEED_H__ ++ ++#if IS_ENABLED(CONFIG_RESET_ASPEED) ++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base, ++ const char *adev_name); ++#else ++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base, ++ const char *adev_name) ++{ ++ return -ENODEV; ++} ++#endif /* if IS_ENABLED(CONFIG_RESET_ASPEED) */ ++ ++#endif /* __RESET_ASPEED_H__ */ +diff --git a/include/trace/events/xdma.h b/include/trace/events/xdma.h +--- a/include/trace/events/xdma.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/trace/events/xdma.h 2025-12-23 10:16:21.990018155 +0000 +@@ -0,0 +1,126 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM xdma ++ ++#if !defined(_TRACE_XDMA_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_XDMA_H ++ ++#include ++ ++TRACE_EVENT(xdma_start, ++ TP_PROTO(const struct aspeed_xdma *ctx, const struct aspeed_xdma_cmd *cmd), ++ TP_ARGS(ctx, cmd), ++ TP_STRUCT__entry(__field(bool, dir_upstream) ++ __field(unsigned int, index) ++ __field(__u64, host) ++ __field(__u64, pitch) ++ __field(__u64, cmd) ++ ), ++ TP_fast_assign(__entry->dir_upstream = ctx->upstream; ++ __entry->index = ctx->cmd_idx; ++ __entry->host = cmd->host_addr; ++ __entry->pitch = cmd->pitch; ++ __entry->cmd = cmd->cmd; ++ ), ++ TP_printk("%s cmd:%u [%08llx %016llx %016llx]", ++ __entry->dir_upstream ? "upstream" : "downstream", ++ __entry->index, ++ __entry->host, ++ __entry->pitch, ++ __entry->cmd ++ ) ++); ++ ++TRACE_EVENT(xdma_irq, ++ TP_PROTO(u32 sts), ++ TP_ARGS(sts), ++ TP_STRUCT__entry(__field(__u32, status) ++ ), ++ TP_fast_assign(__entry->status = sts; ++ ), ++ TP_printk("sts:%08x", ++ __entry->status ++ ) ++); ++ ++TRACE_EVENT(xdma_reset, ++ TP_PROTO(const struct aspeed_xdma *ctx), ++ TP_ARGS(ctx), ++ TP_STRUCT__entry(__field(bool, dir_upstream) ++ __field(bool, in_progress) ++ ), ++ TP_fast_assign(__entry->dir_upstream = ctx->upstream; ++ __entry->in_progress = ++ ctx->current_client ? ctx->current_client->in_progress : false; ++ ), ++ TP_printk("%sin progress%s", ++ __entry->in_progress ? "" : "not ", ++ __entry->in_progress ? (__entry->dir_upstream ? ++ " upstream" : " downstream") : "" ++ ) ++); ++ ++TRACE_EVENT(xdma_perst, ++ TP_PROTO(const struct aspeed_xdma *ctx), ++ TP_ARGS(ctx), ++ TP_STRUCT__entry(__field(bool, in_reset) ++ ), ++ TP_fast_assign(__entry->in_reset = ctx->in_reset; ++ ), ++ TP_printk("%s", ++ __entry->in_reset ? "in reset" : "" ++ ) ++); ++ ++TRACE_EVENT(xdma_unmap, ++ TP_PROTO(const struct aspeed_xdma_client *client), ++ TP_ARGS(client), ++ TP_STRUCT__entry(__field(__u32, phys) ++ __field(__u32, size) ++ ), ++ TP_fast_assign(__entry->phys = client->phys; ++ __entry->size = client->size; ++ ), ++ TP_printk("p:%08x s:%08x", ++ __entry->phys, ++ __entry->size ++ ) ++); ++ ++TRACE_EVENT(xdma_mmap_error, ++ TP_PROTO(const struct aspeed_xdma_client *client, unsigned long vm_start), ++ TP_ARGS(client, vm_start), ++ TP_STRUCT__entry(__field(__u32, phys) ++ __field(__u32, size) ++ __field(unsigned long, vm_start) ++ ), ++ TP_fast_assign(__entry->phys = client->phys; ++ __entry->size = client->size; ++ __entry->vm_start = vm_start; ++ ), ++ TP_printk("p:%08x s:%08x v:%08lx", ++ __entry->phys, ++ __entry->size, ++ __entry->vm_start ++ ) ++); ++ ++TRACE_EVENT(xdma_mmap, ++ TP_PROTO(const struct aspeed_xdma_client *client), ++ TP_ARGS(client), ++ TP_STRUCT__entry(__field(__u32, phys) ++ __field(__u32, size) ++ ), ++ TP_fast_assign(__entry->phys = client->phys; ++ __entry->size = client->size; ++ ), ++ TP_printk("p:%08x s:%08x", ++ __entry->phys, ++ __entry->size ++ ) ++); ++ ++#endif /* _TRACE_XDMA_H */ ++ ++#include +diff --git a/include/uapi/linux/aspeed-mctp.h b/include/uapi/linux/aspeed-mctp.h +--- a/include/uapi/linux/aspeed-mctp.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/uapi/linux/aspeed-mctp.h 2025-12-23 10:16:21.247030608 +0000 +@@ -0,0 +1,136 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* Copyright (c) 2020 Intel Corporation */ ++ ++#ifndef _UAPI_LINUX_ASPEED_MCTP_H ++#define _UAPI_LINUX_ASPEED_MCTP_H ++ ++#include ++#include ++ ++/* ++ * aspeed-mctp is a simple device driver exposing a read/write interface: ++ * +----------------------+ ++ * | PCIe VDM Header | 16 bytes (Big Endian) ++ * +----------------------+ ++ * | MCTP Message Payload | 64/128/256/512 bytes (Big Endian) ++ * +----------------------+ ++ * ++ * MCTP packet description can be found in DMTF DSP0238, ++ * MCTP PCIe VDM Transport Specification. ++ */ ++ ++#define ASPEED_MCTP_PCIE_VDM_HDR_SIZE 16 ++ ++/* ++ * uevents generated by aspeed-mctp driver ++ */ ++#define ASPEED_MCTP_READY "PCIE_READY" ++ ++/* ++ * maximum possible number of struct eid_info elements stored in list ++ */ ++#define ASPEED_MCTP_EID_INFO_MAX 256 ++ ++/* ++ * MCTP operations ++ * @ASPEED_MCTP_IOCTL_FILTER_EID: enable/disable filter incoming packets based ++ * on Endpoint ID (BROKEN) ++ * @ASPEED_MCTP_IOCTL_GET_BDF: read PCI bus/device/function of MCTP Controller ++ * @ASPEED_MCTP_IOCTL_GET_MEDIUM_ID: read MCTP physical medium identifier ++ * related to PCIe revision ++ * @ASPEED_MCTP_IOCTL_GET_MTU: read max transmission unit (in bytes) ++ * @ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER Register client as default ++ * handler that receives all MCTP messages that were not dispatched to other ++ * clients ++ * @ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER Register client to receive all ++ * messages of specified MCTP type or PCI vendor defined type ++ * @ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER Unregister client as handler ++ * for specified MCTP type or PCI vendor defined message type ++ * @ASPEED_MCTP_GET_EID_INFO - deprecated, use ASPEED_MCTP_GET_EID_EXT instead ++ * @ASPEED_MCTP_SET_EID_INFO - deprecated, use ASPEED_MCTP_SET_EID_EXT instead ++ * @ASPEED_MCTP_GET_EID_EXT_INFO: read list of existing CPU EID and Domain ID ++ * mappings and return count which is lesser of the two: requested count and existing count ++ * @ASPEED_MCTP_SET_EID_EXT_INFO: write or overwrite already existing list of ++ * CPU EID and Domain ID mappings ++ * @ASPEED_MCTP_SET_OWN_EID: write/overwrite own EID information ++ */ ++ ++struct aspeed_mctp_filter_eid { ++ __u8 eid; ++ bool enable; ++}; ++ ++struct aspeed_mctp_get_bdf { ++ __u16 bdf; ++}; ++ ++struct aspeed_mctp_get_medium_id { ++ __u8 medium_id; ++}; ++ ++struct aspeed_mctp_get_mtu { ++ __u16 mtu; ++}; ++ ++struct aspeed_mctp_type_handler_ioctl { ++ __u8 mctp_type; /* MCTP message type as per DSP239*/ ++ /* Below params must be 0 if mctp_type is not Vendor Defined PCI */ ++ __u16 pci_vendor_id; /* PCI Vendor ID */ ++ __u16 vendor_type; /* Vendor specific type */ ++ __u16 vendor_type_mask; /* Mask applied to vendor type */ ++}; ++ ++struct aspeed_mctp_eid_info { ++ __u8 eid; ++ __u16 bdf; ++}; ++ ++struct aspeed_mctp_eid_ext_info { ++ __u8 eid; ++ __u16 bdf; ++ __u8 domain_id; ++}; ++ ++struct aspeed_mctp_get_eid_info { ++ __u64 ptr; ++ __u16 count; ++ __u8 start_eid; ++}; ++ ++struct aspeed_mctp_set_eid_info { ++ __u64 ptr; ++ __u16 count; ++}; ++ ++struct aspeed_mctp_set_own_eid { ++ __u8 eid; ++}; ++ ++#define ASPEED_MCTP_IOCTL_BASE 0x4d ++ ++#define ASPEED_MCTP_IOCTL_FILTER_EID \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 0, struct aspeed_mctp_filter_eid) ++#define ASPEED_MCTP_IOCTL_GET_BDF \ ++ _IOR(ASPEED_MCTP_IOCTL_BASE, 1, struct aspeed_mctp_get_bdf) ++#define ASPEED_MCTP_IOCTL_GET_MEDIUM_ID \ ++ _IOR(ASPEED_MCTP_IOCTL_BASE, 2, struct aspeed_mctp_get_medium_id) ++#define ASPEED_MCTP_IOCTL_GET_MTU \ ++ _IOR(ASPEED_MCTP_IOCTL_BASE, 3, struct aspeed_mctp_get_mtu) ++#define ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER \ ++ _IO(ASPEED_MCTP_IOCTL_BASE, 4) ++#define ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 6, struct aspeed_mctp_type_handler_ioctl) ++#define ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 7, struct aspeed_mctp_type_handler_ioctl) ++#define ASPEED_MCTP_IOCTL_GET_EID_INFO \ ++ _IOWR(ASPEED_MCTP_IOCTL_BASE, 8, struct aspeed_mctp_get_eid_info) ++#define ASPEED_MCTP_IOCTL_SET_EID_INFO \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 9, struct aspeed_mctp_set_eid_info) ++#define ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 10, struct aspeed_mctp_get_eid_info) ++#define ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 11, struct aspeed_mctp_set_eid_info) ++#define ASPEED_MCTP_IOCTL_SET_OWN_EID \ ++ _IOW(ASPEED_MCTP_IOCTL_BASE, 12, struct aspeed_mctp_set_own_eid) ++ ++#endif /* _UAPI_LINUX_ASPEED_MCTP_H */ +diff --git a/include/uapi/linux/aspeed-video.h b/include/uapi/linux/aspeed-video.h +--- a/include/uapi/linux/aspeed-video.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/uapi/linux/aspeed-video.h 2025-12-23 10:16:21.223031010 +0000 +@@ -8,6 +8,15 @@ + + #include + ++/* aspeed video's input types */ ++enum aspeed_video_input { ++ VIDEO_INPUT_VGA = 0, ++ VIDEO_INPUT_GFX, ++ VIDEO_INPUT_MEM, ++ VIDEO_INPUT_DVI, ++ VIDEO_INPUT_MAX ++}; ++ + #define V4L2_CID_ASPEED_HQ_MODE (V4L2_CID_USER_ASPEED_BASE + 1) + #define V4L2_CID_ASPEED_HQ_JPEG_QUALITY (V4L2_CID_USER_ASPEED_BASE + 2) + +diff --git a/include/uapi/linux/aspeed-xdma.h b/include/uapi/linux/aspeed-xdma.h +--- a/include/uapi/linux/aspeed-xdma.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/uapi/linux/aspeed-xdma.h 2025-12-23 10:16:21.233030843 +0000 +@@ -0,0 +1,42 @@ ++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ ++/* Copyright IBM Corp 2019 */ ++ ++#ifndef _UAPI_LINUX_ASPEED_XDMA_H_ ++#define _UAPI_LINUX_ASPEED_XDMA_H_ ++ ++#include ++#include ++ ++#define __ASPEED_XDMA_IOCTL_MAGIC 0xb7 ++#define ASPEED_XDMA_IOCTL_RESET _IO(__ASPEED_XDMA_IOCTL_MAGIC, 0) ++ ++/* ++ * aspeed_xdma_direction ++ * ++ * ASPEED_XDMA_DIRECTION_DOWNSTREAM: transfers data from the host to the BMC ++ * ++ * ASPEED_XDMA_DIRECTION_UPSTREAM: transfers data from the BMC to the host ++ */ ++enum aspeed_xdma_direction { ++ ASPEED_XDMA_DIRECTION_DOWNSTREAM = 0, ++ ASPEED_XDMA_DIRECTION_UPSTREAM, ++}; ++ ++/* ++ * aspeed_xdma_op ++ * ++ * host_addr: the DMA address on the host side, typically configured by PCI ++ * subsystem ++ * ++ * len: the size of the transfer in bytes ++ * ++ * direction: an enumerator indicating the direction of the DMA operation; see ++ * enum aspeed_xdma_direction ++ */ ++struct aspeed_xdma_op { ++ __u64 host_addr; ++ __u32 len; ++ __u32 direction; ++}; ++ ++#endif /* _UAPI_LINUX_ASPEED_XDMA_H_ */ +diff --git a/include/uapi/linux/otp_ast2600.h b/include/uapi/linux/otp_ast2600.h +--- a/include/uapi/linux/otp_ast2600.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/uapi/linux/otp_ast2600.h 2025-12-23 10:16:21.238030759 +0000 +@@ -0,0 +1,39 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ ++/* ++ * Copyright (C) 2021 ASPEED Technology Inc. ++ */ ++ ++#ifndef _UAPI_LINUX_OTP_AST2600_H ++#define _UAPI_LINUX_OTP_AST2600_H ++ ++#include ++#include ++ ++struct otp_read { ++ unsigned int offset; ++ unsigned int len; ++ unsigned int *data; ++}; ++ ++struct otp_prog { ++ unsigned int dw_offset; ++ unsigned int bit_offset; ++ unsigned int value; ++}; ++ ++#define OTP_A0 0 ++#define OTP_A1 1 ++#define OTP_A2 2 ++#define OTP_A3 3 ++ ++#define OTPIOC_BASE 'O' ++ ++#define ASPEED_OTP_READ_DATA _IOR(OTPIOC_BASE, 0, struct otp_read) ++#define ASPEED_OTP_READ_CONF _IOR(OTPIOC_BASE, 1, struct otp_read) ++#define ASPEED_OTP_PROG_DATA _IOW(OTPIOC_BASE, 2, struct otp_prog) ++#define ASPEED_OTP_PROG_CONF _IOW(OTPIOC_BASE, 3, struct otp_prog) ++#define ASPEED_OTP_VER _IOR(OTPIOC_BASE, 4, unsigned int) ++#define ASPEED_OTP_SW_RID _IOR(OTPIOC_BASE, 5, u32 *) ++#define ASPEED_SEC_KEY_NUM _IOR(OTPIOC_BASE, 6, u32 *) ++ ++#endif /* _UAPI_LINUX_OTP_AST2600_H */ +diff --git a/include/uapi/linux/otp_ast2700.h b/include/uapi/linux/otp_ast2700.h +--- a/include/uapi/linux/otp_ast2700.h 1970-01-01 00:00:00.000000000 +0000 ++++ b/include/uapi/linux/otp_ast2700.h 2025-12-23 10:16:21.218031094 +0000 +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ ++/* ++ * Copyright (C) 2021 ASPEED Technology Inc. ++ */ ++ ++#ifndef _UAPI_LINUX_OTP_AST2700_H ++#define _UAPI_LINUX_OTP_AST2700_H ++ ++#include ++#include ++ ++struct otp_read { ++ unsigned int offset; ++ unsigned int len; ++ uint8_t *data; ++}; ++ ++struct otp_prog { ++ unsigned int w_offset; ++ unsigned int len; ++ uint8_t *data; ++}; ++ ++struct otp_revid { ++ uint32_t revid0; ++ uint32_t revid1; ++}; ++ ++#define OTP_A0 0 ++#define OTP_A1 1 ++#define OTP_A2 2 ++#define OTP_A3 3 ++ ++#define OTPIOC_BASE 'O' ++ ++#define ASPEED_OTP_READ_DATA _IOR(OTPIOC_BASE, 0, struct otp_read) ++#define ASPEED_OTP_READ_CONF _IOR(OTPIOC_BASE, 1, struct otp_read) ++#define ASPEED_OTP_PROG_DATA _IOW(OTPIOC_BASE, 2, struct otp_prog) ++#define ASPEED_OTP_PROG_CONF _IOW(OTPIOC_BASE, 3, struct otp_prog) ++#define ASPEED_OTP_VER _IOR(OTPIOC_BASE, 4, unsigned int) ++#define ASPEED_OTP_SW_RID _IOR(OTPIOC_BASE, 5, u32 *) ++#define ASPEED_SEC_KEY_NUM _IOR(OTPIOC_BASE, 6, u32 *) ++#define ASPEED_OTP_GET_ECC _IOR(OTPIOC_BASE, 7, uint32_t) ++#define ASPEED_OTP_SET_ECC _IO(OTPIOC_BASE, 8) ++#define ASPEED_OTP_GET_REVID _IOR(OTPIOC_BASE, 9, struct otp_revid) ++ ++#endif /* _UAPI_LINUX_OTP_AST2700_H */ +diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h +--- a/include/uapi/linux/videodev2.h 2025-08-01 08:48:47.000000000 +0000 ++++ b/include/uapi/linux/videodev2.h 2025-12-23 10:16:22.013017770 +0000 +@@ -880,6 +880,7 @@ + /* Flags */ + #define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001 + #define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002 ++#define V4L2_PIX_FMT_FLAG_PARTIAL_JPG 0x00000004 + + /* + * F O R M A T E N U M E R A T I O N diff --git a/patches-sonic/series b/patches-sonic/series index 8d862b559..79e0bbcab 100644 --- a/patches-sonic/series +++ b/patches-sonic/series @@ -219,6 +219,10 @@ cisco-npu-disable-other-bars.patch 0011-PCI-AER-Fix-NULL-pointer-access-by-aer_info.patch 0012-PCI-AER-Avoid-NULL-pointer-dereference-in-aer_rateli.patch +###-> aspeed +aspeed-ast2700-support.patch +###-> aspeed-end + # # ############################################################